code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowercase ( __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = TextToVideoSDPipeline
_SCREAMING_SNAKE_CASE : Tuple = TEXT_TO_IMAGE_PARAMS
_SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
_SCREAMING_SNAKE_CASE : List[str] = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def a ( self : Union[str, Any] ) -> Dict:
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__snake_case = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__snake_case = CLIPTextModel(SCREAMING_SNAKE_CASE_ )
__snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__snake_case = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def a ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 ) -> Union[str, Any]:
if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ):
__snake_case = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
__snake_case = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def a ( self : Any ) -> Union[str, Any]:
__snake_case = 'cpu' # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = TextToVideoSDPipeline(**SCREAMING_SNAKE_CASE_ )
__snake_case = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ )
__snake_case = 'np'
__snake_case = sd_pipe(**SCREAMING_SNAKE_CASE_ ).frames
__snake_case = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
__snake_case = np.array([1_5_8.0, 1_6_0.0, 1_5_3.0, 1_2_5.0, 1_0_0.0, 1_2_1.0, 1_1_1.0, 9_3.0, 1_1_3.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def a ( self : Optional[int] ) -> str:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def a ( self : Any ) -> int:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=SCREAMING_SNAKE_CASE_ , expected_max_diff=1e-2 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : List[str] ) -> Optional[int]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def a ( self : Optional[int] ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def a ( self : List[Any] ) -> List[str]:
pass
def a ( self : Dict ) -> Optional[int]:
return super().test_progress_bar()
@slow
@skip_mps
class _lowercase ( unittest.TestCase ):
def a ( self : List[Any] ) -> str:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=25 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def a ( self : Dict ) -> Any:
__snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy' )
__snake_case = TextToVideoSDPipeline.from_pretrained('damo-vilab/text-to-video-ms-1.7b' )
__snake_case = pipe.to('cuda' )
__snake_case = 'Spiderman is surfing'
__snake_case = torch.Generator(device='cpu' ).manual_seed(0 )
__snake_case = pipe(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='pt' ).frames
__snake_case = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 56
|
from __future__ import annotations
from collections.abc import Callable
lowercase__ : Optional[Any] = list[list[float | int]]
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = len(_A )
snake_case_ = [[0 for _ in range(size + 1 )] for _ in range(_A )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for row in range(_A ):
for col in range(_A ):
snake_case_ = matrix[row][col]
snake_case_ = vector[row][0]
snake_case_ = 0
snake_case_ = 0
while row < size and col < size:
# pivoting
snake_case_ = max((abs(augmented[rowa][col] ), rowa) for rowa in range(_A , _A ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case_ , snake_case_ = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , _A ):
snake_case_ = augmented[rowa][col] / augmented[row][col]
snake_case_ = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , _A ):
for row in range(_A ):
snake_case_ = augmented[row][col] / augmented[col][col]
for cola in range(_A , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(_A )
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = len(_A )
snake_case_ = [[0 for _ in range(_A )] for _ in range(_A )]
snake_case_ = [[0] for _ in range(_A )]
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
for x_val, y_val in enumerate(_A ):
for col in range(_A ):
snake_case_ = (x_val + 1) ** (size - col - 1)
snake_case_ = y_val
snake_case_ = solve(_A , _A )
def interpolated_func(_A ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(_A ) )
return interpolated_func
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( _A = question_function , _A = 10 ):
'''simple docstring'''
snake_case_ = [func(_A ) for x_val in range(1 , order + 1 )]
snake_case_ = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case_ = 0
snake_case_ = 42
snake_case_ = 42
for poly in polynomials:
snake_case_ = 1
while func(_A ) == poly(_A ):
x_val += 1
ret += poly(_A )
return ret
if __name__ == "__main__":
print(f'''{solution() = }''')
| 376
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Tuple = """imagegpt"""
_UpperCamelCase : str = ["""past_key_values"""]
_UpperCamelCase : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=512 + 1 , snake_case=32 * 32 , snake_case=512 , snake_case=24 , snake_case=8 , snake_case=None , snake_case="quick_gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.1 , snake_case=1E-5 , snake_case=0.02 , snake_case=True , snake_case=True , snake_case=False , snake_case=False , snake_case=False , **snake_case , ) -> List[Any]:
"""simple docstring"""
a__ : int = vocab_size
a__ : Union[str, Any] = n_positions
a__ : List[str] = n_embd
a__ : List[str] = n_layer
a__ : List[Any] = n_head
a__ : List[str] = n_inner
a__ : int = activation_function
a__ : Dict = resid_pdrop
a__ : List[Any] = embd_pdrop
a__ : List[Any] = attn_pdrop
a__ : int = layer_norm_epsilon
a__ : Optional[int] = initializer_range
a__ : List[str] = scale_attn_weights
a__ : str = use_cache
a__ : str = scale_attn_by_inverse_layer_idx
a__ : int = reorder_and_upcast_attn
a__ : List[Any] = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case , **snake_case )
class __lowerCAmelCase ( _UpperCamelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def _snake_case ( self , snake_case , snake_case = 1 , snake_case = -1 , snake_case = False , snake_case = None , snake_case = 3 , snake_case = 32 , snake_case = 32 , ) -> Mapping[str, Any]:
"""simple docstring"""
a__ : Tuple = self._generate_dummy_images(snake_case , snake_case , snake_case , snake_case )
a__ : Any = dict(preprocessor(images=snake_case , return_tensors=snake_case ) )
return inputs
| 700
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
SCREAMING_SNAKE_CASE__ : int = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE__ : Dict = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def _A ( lowerCamelCase ):
a__ : Optional[int] = (images / 2 + 0.5).clamp(0 , 1 )
a__ : List[Any] = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a__ : int = numpy_to_pil(lowerCamelCase )
return images
def _A ( lowerCamelCase ):
if images.ndim == 3:
a__ : Tuple = images[None, ...]
a__ : Dict = (images * 255).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
a__ : str = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
a__ : List[Any] = [Image.fromarray(lowerCamelCase ) for image in images]
return pil_images
| 629
| 0
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
__lowerCAmelCase = str(bin(_UpperCamelCase ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(_UpperCamelCase ) )[2:]
__lowerCAmelCase = max(len(_UpperCamelCase ) , len(_UpperCamelCase ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(_UpperCamelCase ) , b_binary.zfill(_UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 636
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : List[str] ="""Speech2TextFeatureExtractor"""
__UpperCAmelCase : Union[str, Any] ="""Speech2TextTokenizer"""
def __init__( self , __a , __a ):
super().__init__(__a , __a )
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
def __call__( self , *__a , **__a ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
__lowerCAmelCase = kwargs.pop("raw_speech" )
else:
__lowerCAmelCase = kwargs.pop("audio" , __a )
__lowerCAmelCase = kwargs.pop("sampling_rate" , __a )
__lowerCAmelCase = kwargs.pop("text" , __a )
if len(__a ) > 0:
__lowerCAmelCase = args[0]
__lowerCAmelCase = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
__lowerCAmelCase = self.feature_extractor(__a , *__a , sampling_rate=__a , **__a )
if text is not None:
__lowerCAmelCase = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__lowerCAmelCase = encodings["input_ids"]
return inputs
def snake_case ( self , *__a , **__a ):
return self.tokenizer.batch_decode(*__a , **__a )
def snake_case ( self , *__a , **__a ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def snake_case ( self ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
__lowerCAmelCase = True
__lowerCAmelCase = self.tokenizer
yield
__lowerCAmelCase = self.feature_extractor
__lowerCAmelCase = False
| 636
| 1
|
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : Any = VQModel
lowercase_ : Union[str, Any] = '''sample'''
@property
def a ( self : Optional[Any] , lowerCamelCase__ : str=(32, 32) ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 4
__UpperCamelCase : List[Any] = 3
__UpperCamelCase : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
return {"sample": image}
@property
def a ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def a ( self : List[str] ):
"""simple docstring"""
return (3, 32, 32)
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
__UpperCamelCase : int = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[int] ):
"""simple docstring"""
pass
def a ( self : Optional[int] ):
"""simple docstring"""
pass
def a ( self : Optional[int] ):
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase : str = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : int ):
"""simple docstring"""
__UpperCamelCase : Tuple = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(lowerCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
__UpperCamelCase : int = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
__UpperCamelCase : List[str] = image.to(lowerCamelCase__ )
with torch.no_grad():
__UpperCamelCase : str = model(lowerCamelCase__ ).sample
__UpperCamelCase : Dict = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__UpperCamelCase : Optional[int] = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143] )
# fmt: on
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1e-3 ) )
| 702
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
UpperCamelCase = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 515
| 0
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
snake_case__ : Optional[int] = logging.get_logger(__name__)
def _snake_case (__lowercase , __lowercase , __lowercase):
UpperCamelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(__lowercase) == len(__lowercase), f"""{len(__lowercase)} != {len(__lowercase)}"""
dest_layers.load_state_dict(layers_to_copy.state_dict())
snake_case__ : Tuple = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
snake_case__ : Any = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def _snake_case (__lowercase , __lowercase):
try:
UpperCamelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""")
return list(range(__lowercase))
def _snake_case (__lowercase , __lowercase):
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""")
elif n_teacher == n_student:
return list(range(__lowercase))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def _snake_case (__lowercase , __lowercase = "student" , __lowercase = None , __lowercase = None , __lowercase=False , __lowercase=None , __lowercase=None , **__lowercase , ):
UpperCamelCase_ = 'encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'
assert (e is not None) or (d is not None), _msg
if isinstance(__lowercase , __lowercase):
AutoTokenizer.from_pretrained(__lowercase).save_pretrained(__lowercase) # purely for convenience
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__lowercase).eval()
else:
assert isinstance(__lowercase , __lowercase), f"""teacher must be a model or string got type {type(__lowercase)}"""
UpperCamelCase_ = teacher.config.to_diff_dict()
try:
UpperCamelCase_ , UpperCamelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCamelCase_ = teacher_e
if d is None:
UpperCamelCase_ = teacher_d
init_kwargs.update({'encoder_layers': e, 'decoder_layers': d})
except AttributeError: # T5
if hasattr(teacher.config , 'num_encoder_layers'):
UpperCamelCase_ , UpperCamelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCamelCase_ , UpperCamelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCamelCase_ = teacher_e
if d is None:
UpperCamelCase_ = teacher_d
if hasattr(teacher.config , 'num_encoder_layers'):
init_kwargs.update({'num_encoder_layers': e, 'num_decoder_layers': d})
else:
init_kwargs.update({'num_layers': e, 'num_decoder_layers': d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__lowercase)
# Copy weights
UpperCamelCase_ = teacher.config_class(**__lowercase)
UpperCamelCase_ = AutoModelForSeqaSeqLM.from_config(__lowercase)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCamelCase_ = student.load_state_dict(teacher.state_dict() , strict=__lowercase)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCamelCase_ , UpperCamelCase_ = list(range(__lowercase)), list(range(__lowercase))
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""")
student.save_pretrained(__lowercase)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCamelCase_ = pick_layers_to_copy(__lowercase , __lowercase)
if d_layers_to_copy is None:
UpperCamelCase_ = pick_layers_to_copy(__lowercase , __lowercase)
try:
if hasattr(
__lowercase , 'prophetnet'): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __lowercase)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __lowercase)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __lowercase)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __lowercase)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __lowercase)
copy_layers(teacher.decoder.block , student.decoder.block , __lowercase)
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""")
UpperCamelCase_ = {
'teacher_type': teacher.config.model_type,
'copied_encoder_layers': e_layers_to_copy,
'copied_decoder_layers': d_layers_to_copy,
}
student.save_pretrained(__lowercase)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 23
|
"""simple docstring"""
import qiskit
def _A ( _a : int , _a : int ):
"""simple docstring"""
A = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A = qiskit.QuantumCircuit(_a , _a )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A = qiskit.execute(_a , _a , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_a )
if __name__ == "__main__":
UpperCAmelCase =single_qubit_measure(2, 2)
print(f"""Total count for various states are: {counts}""")
| 617
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CanineTokenizer
SCREAMING_SNAKE_CASE = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
__A = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowerCAmelCase_ ( self : Optional[Any] , **UpperCamelCase_ : Dict ):
"""simple docstring"""
__A = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
__A = 1_024
return tokenizer
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.canine_tokenizer
__A = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__A = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
__A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
__A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
__A = self.canine_tokenizer
__A = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__A = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertIn("""token_type_ids""" , lowerCAmelCase_ )
@require_torch
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
__A = self.canine_tokenizer
__A = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__A = tokenizer(
text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
__A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A = tempfile.mkdtemp()
__A = """ He is very happy, UNwant\u00E9d,running"""
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
shutil.rmtree(lowerCAmelCase_ )
__A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__A = tempfile.mkdtemp()
__A = """ He is very happy, UNwant\u00E9d,running"""
__A = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__A = chr(0xE_0_0_7 )
additional_special_tokens.append(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
tokenizer.save_pretrained(lowerCAmelCase_ )
__A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ )
__A = after_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertIn(lowerCAmelCase_ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__A = tokenizer.__class__.from_pretrained(lowerCAmelCase_ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__A , __A = self.get_clean_sequence(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
__A = 0xE_0_0_5
__A = chr(lowerCAmelCase_ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
__A = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase_ )
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , input_encoded + special_token_id )
__A = tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
self.assertTrue(special_token not in decoded )
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
__A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__A = chr(0xE_0_0_5 )
__A = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase_ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__A = tokenizer.tokenize(lowerCAmelCase_ )
__A = tokenizer.tokenize(lowerCAmelCase_ )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(len(lowerCAmelCase_ ) , 1 )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
self.assertEqual(token_a[0] , lowerCAmelCase_ )
@require_tokenizers
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__A = 0xE_0_0_6
__A = chr(lowerCAmelCase_ )
__A = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(lowerCAmelCase_ )
tokenizer.from_pretrained(lowerCAmelCase_ )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
__A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__A = json.load(lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__A = json.load(lowerCAmelCase_ )
# a special token for Canine can be defined as follows:
__A = 0xE_0_0_6
__A = chr(lowerCAmelCase_ )
__A = [new_token_a]
__A = [new_token_a]
with open(os.path.join(lowerCAmelCase_ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
with open(os.path.join(lowerCAmelCase_ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__A = tokenizer_class.from_pretrained(lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__A = 0xE_0_0_7
__A = chr(lowerCAmelCase_ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__A = [AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ )]
__A = tokenizer_class.from_pretrained(
lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , extra_ids=0 )
self.assertIn(lowerCAmelCase_ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = self.get_tokenizers(do_lower_case=lowerCAmelCase_ )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__A = """hello world"""
if self.space_between_special_tokens:
__A = """[CLS] hello world [SEP]"""
else:
__A = input
__A = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
__A = tokenizer.decode(lowerCAmelCase_ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(lowerCAmelCase_ , [output, output.lower()] )
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
__A = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__A = """a"""
__A = ord(lowerCAmelCase_ )
for attr in attributes_list:
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , attr + """_id""" , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(getattr(lowerCAmelCase_ , attr + """_id""" ) , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [] )
__A = 0xE_0_0_6
__A = chr(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(lowerCAmelCase_ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowerCAmelCase_ ( self : List[Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Any ):
"""simple docstring"""
pass
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
pass
| 717
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__a : Optional[int] = {"tokenization_bertweet": ["BertweetTokenizer"]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
__a : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 199
| 0
|
def lowerCamelCase ( UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ) -> int:
_lowerCamelCase = right or len(UpperCamelCase ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(UpperCamelCase , UpperCamelCase , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 544
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _snake_case ( snake_case__ : ArgumentParser ) -> Tuple:
raise NotImplementedError()
@abstractmethod
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
raise NotImplementedError()
| 544
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowerCamelCase :str = 0
__lowerCamelCase :Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase :Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowerCamelCase :Tuple = tuple[int, int]
class A__ :
"""simple docstring"""
def __init__( self: int , __a: int , __a: int , __a: int , __a: int , __a: int , __a: Node | None , )-> None:
lowerCamelCase : Optional[int] = pos_x
lowerCamelCase : List[Any] = pos_y
lowerCamelCase : Union[str, Any] = (pos_y, pos_x)
lowerCamelCase : List[Any] = goal_x
lowerCamelCase : Optional[Any] = goal_y
lowerCamelCase : str = g_cost
lowerCamelCase : str = parent
lowerCamelCase : Optional[Any] = self.calculate_heuristic()
lowerCamelCase : List[str] = self.g_cost + self.h_cost
def a__ ( self: List[str] )-> float:
lowerCamelCase : List[str] = self.pos_x - self.goal_x
lowerCamelCase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__a ) + abs(__a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: List[Any] , __a: Node )-> bool:
return self.f_cost < other.f_cost
class A__ :
"""simple docstring"""
def __init__( self: Any , __a: TPosition , __a: TPosition )-> Optional[Any]:
lowerCamelCase : List[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __a )
lowerCamelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __a )
lowerCamelCase : Optional[Any] = [self.start]
lowerCamelCase : list[Node] = []
lowerCamelCase : Tuple = False
def a__ ( self: int )-> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowerCamelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(__a )
self.closed_nodes.append(__a )
lowerCamelCase : Dict = self.get_successors(__a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : Any = self.open_nodes.pop(self.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__a )
else:
self.open_nodes.append(__a )
return [self.start.pos]
def a__ ( self: Union[str, Any] , __a: Node )-> list[Node]:
lowerCamelCase : str = []
for action in delta:
lowerCamelCase : Union[str, Any] = parent.pos_x + action[1]
lowerCamelCase : Optional[Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__a , __a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __a , ) )
return successors
def a__ ( self: Union[str, Any] , __a: Node | None )-> list[TPosition]:
lowerCamelCase : Dict = node
lowerCamelCase : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCamelCase : str = current_node.parent
path.reverse()
return path
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: TPosition , __a: TPosition )-> None:
lowerCamelCase : Dict = AStar(__a , __a )
lowerCamelCase : str = AStar(__a , __a )
lowerCamelCase : int = False
def a__ ( self: str )-> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowerCamelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
lowerCamelCase : List[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__a , __a )
self.fwd_astar.closed_nodes.append(__a )
self.bwd_astar.closed_nodes.append(__a )
lowerCamelCase : int = current_bwd_node
lowerCamelCase : List[Any] = current_fwd_node
lowerCamelCase : Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(__a ),
self.bwd_astar: self.bwd_astar.get_successors(__a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__a )
else:
# retrieve the best current path
lowerCamelCase : str = astar.open_nodes.pop(
astar.open_nodes.index(__a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__a )
else:
astar.open_nodes.append(__a )
return [self.fwd_astar.start.pos]
def a__ ( self: List[str] , __a: Node , __a: Node )-> list[TPosition]:
lowerCamelCase : str = self.fwd_astar.retrace_path(__a )
lowerCamelCase : Any = self.bwd_astar.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
lowerCamelCase : Any = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowerCamelCase :Optional[int] = (0, 0)
__lowerCamelCase :Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase :Optional[int] = time.time()
__lowerCamelCase :Optional[int] = AStar(init, goal)
__lowerCamelCase :int = a_star.search()
__lowerCamelCase :Optional[Any] = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
__lowerCamelCase :List[Any] = time.time()
__lowerCamelCase :List[str] = BidirectionalAStar(init, goal)
__lowerCamelCase :List[str] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 42
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase :Tuple = logging.get_logger(__name__)
__lowerCamelCase :Any = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Tuple ='''glpn'''
def __init__( self: Dict , __a: List[str]=3 , __a: Optional[int]=4 , __a: Dict=[2, 2, 2, 2] , __a: str=[8, 4, 2, 1] , __a: Optional[int]=[32, 64, 160, 256] , __a: Dict=[7, 3, 3, 3] , __a: Dict=[4, 2, 2, 2] , __a: Optional[Any]=[1, 2, 5, 8] , __a: Tuple=[4, 4, 4, 4] , __a: int="gelu" , __a: Union[str, Any]=0.0 , __a: str=0.0 , __a: Union[str, Any]=0.02 , __a: str=0.1 , __a: Union[str, Any]=1e-6 , __a: Any=64 , __a: Dict=10 , __a: Union[str, Any]=-1 , **__a: Optional[Any] , )-> Dict:
super().__init__(**__a )
lowerCamelCase : Dict = num_channels
lowerCamelCase : Any = num_encoder_blocks
lowerCamelCase : Dict = depths
lowerCamelCase : List[str] = sr_ratios
lowerCamelCase : Dict = hidden_sizes
lowerCamelCase : Tuple = patch_sizes
lowerCamelCase : Optional[int] = strides
lowerCamelCase : Optional[Any] = mlp_ratios
lowerCamelCase : Union[str, Any] = num_attention_heads
lowerCamelCase : List[str] = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : Dict = drop_path_rate
lowerCamelCase : Any = layer_norm_eps
lowerCamelCase : Optional[Any] = decoder_hidden_size
lowerCamelCase : Tuple = max_depth
lowerCamelCase : Optional[Any] = head_in_index
| 42
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def UpperCamelCase__ ( _A: str ):
'''simple docstring'''
def decorator(_A: int ):
__lowerCamelCase = getattr(_A , """handle_key""" , [] )
handle += [key]
setattr(_A , """handle_key""" , _A )
return func
return decorator
def UpperCamelCase__ ( *_A: List[str] ):
'''simple docstring'''
def decorator(_A: str ):
__lowerCamelCase = getattr(_A , """handle_key""" , [] )
handle += keys
setattr(_A , """handle_key""" , _A )
return func
return decorator
class UpperCamelCase_ ( __UpperCamelCase ):
"""simple docstring"""
def __new__( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase = super().__new__(cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not hasattr(UpperCAmelCase , """key_handler""" ):
setattr(UpperCAmelCase , """key_handler""" , {} )
setattr(UpperCAmelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
__lowerCamelCase = getattr(UpperCAmelCase , """handle_key""" , [] )
for key in handled_keys:
__lowerCamelCase = value
return new_cls
@staticmethod
def lowerCamelCase_ ( cls ):
__lowerCamelCase = get_character()
if char != KEYMAP["undefined"]:
__lowerCamelCase = ord(UpperCAmelCase )
__lowerCamelCase = cls.key_handler.get(UpperCAmelCase )
if handler:
__lowerCamelCase = char
return handler(cls )
else:
return None
def UpperCamelCase__ ( cls: Any ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 479
|
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase_ ( __UpperCamelCase ,__UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , UpperCAmelCase = 7_6_8 , ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(torch.ones(1 , UpperCAmelCase ) )
def lowerCamelCase_ ( self , UpperCAmelCase = None , UpperCAmelCase = None , ):
__lowerCamelCase = nn.Parameter(self.mean.to(UpperCAmelCase ).to(UpperCAmelCase ) )
__lowerCamelCase = nn.Parameter(self.std.to(UpperCAmelCase ).to(UpperCAmelCase ) )
return self
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowerCamelCase_ ( self , UpperCAmelCase ):
__lowerCamelCase = (embeds * self.std) + self.mean
return embeds
| 479
| 1
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowerCamelCase : Tuple = logging.getLogger()
def A__ ( ) ->str:
__A =argparse.ArgumentParser()
parser.add_argument('''-f''' )
__A =parser.parse_args()
return args.f
def A__ ( __A : str ) ->int:
__A ={}
__A =os.path.join(__A , '''all_results.json''' )
if os.path.exists(__A ):
with open(__A , '''r''' ) as f:
__A =json.load(__A )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def A__ ( ) ->Optional[int]:
__A =torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
_lowerCamelCase : Optional[int] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
@classmethod
def __UpperCamelCase ( cls ):
'''simple docstring'''
__A =tempfile.mkdtemp()
__A =os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__A =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __UpperCamelCase ( cls ):
'''simple docstring'''
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
'''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertLess(result['''perplexity'''] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertLess(result['''perplexity'''] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =7 if get_gpu_count() > 1 else 2
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 2_8 )
self.assertGreaterEqual(result['''eval_exact'''] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_bleu'''] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''translation_no_trainer''' ) ) )
@slow
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
'''.split()
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __UpperCamelCase ( self ):
'''simple docstring'''
__A =self.get_auto_remove_tmp_dir()
__A =f'''
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
'''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__A =get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , '''image_classification_no_trainer''' ) ) )
| 702
|
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : List[str] = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def A__ ( __A : Optional[int] , __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Union[str, Any] ) ->str:
for attribute in key.split('''.''' ):
__A =getattr(__A , __A )
if weight_type is not None:
__A =getattr(__A , __A ).shape
else:
__A =hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
__A =value
elif weight_type == "weight_g":
__A =value
elif weight_type == "weight_v":
__A =value
elif weight_type == "bias":
__A =value
else:
__A =value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A__ ( __A : int , __A : str ) ->List[str]:
__A =[]
__A =fairseq_model.state_dict()
__A =hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__A =None
for name, value in fairseq_dict.items():
__A =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
__A =True
elif name.split('''.''' )[0] == "proj":
__A =fairseq_model.proj
__A =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
__A =True
if "*" in mapped_key:
__A =name.split(__A )[0].split('''.''' )[-2]
__A =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
__A ='''weight_g'''
elif "weight_v" in name:
__A ='''weight_v'''
elif "bias" in name:
__A ='''bias'''
elif "weight" in name:
__A ='''weight'''
else:
__A =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def A__ ( __A : str , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : str ) ->Optional[Any]:
__A =full_name.split('''conv_layers.''' )[-1]
__A =name.split('''.''' )
__A =int(items[0] )
__A =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
__A =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__A )
def A__ ( __A : Optional[Any] ) ->List[Any]:
__A , __A =emb.weight.shape
__A =nn.Linear(__A , __A , bias=__A )
__A =emb.weight.data
return lin_layer
def A__ ( __A : Dict ) ->Optional[int]:
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
__A =f.readlines()
__A =[line.split(''' ''' )[0] for line in lines]
__A =len(__A )
__A ={
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(__A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def A__ ( __A : List[Any] , __A : Optional[Any] , __A : Tuple , __A : int , __A : str , __A : str , __A : Dict , ) ->Tuple:
__A =WavaVecaConfig.from_pretrained(__A )
__A =SpeechaTextaConfig.from_pretrained(
__A , vocab_size=__A , decoder_layers=__A , do_stable_layer_norm=__A )
__A =WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__A , return_attention_mask=__A , )
__A , __A , __A =fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
__A =model[0].eval()
# set weights for wav2vec2 encoder
__A =WavaVecaModel(__A )
__A =recursively_load_weights_wavaveca(model.encoder , __A )
__A =SpeechaTextaForCausalLM(__A )
__A , __A =hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
__A =nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
__A =SpeechEncoderDecoderModel(encoder=__A , decoder=__A )
__A =False
# add projection layer
__A =nn.Parameter(projection_layer.weight )
__A =nn.Parameter(projection_layer.bias )
__A =create_vocab_dict(__A )
with open(os.path.join(__A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(__A , __A )
__A =SpeechaTextaTokenizer(os.path.join(__A , '''vocab.json''' ) )
tokenizer.save_pretrained(__A )
__A =hf_wavavec.config.to_dict()
__A =tokenizer.pad_token_id
__A =tokenizer.bos_token_id
__A =tokenizer.eos_token_id
__A ='''speech_to_text_2'''
__A ='''wav2vec2'''
__A =SpeechEncoderDecoderConfig.from_dict(__A )
hf_wavavec.save_pretrained(__A )
feature_extractor.save_pretrained(__A )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
_lowerCamelCase : Tuple = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 516
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
lowerCAmelCase : Optional[int] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def A_( A : str = "dhaka" , A : int = 5):
UpperCamelCase = min(A , 50) # Prevent abuse!
UpperCamelCase = {
'q': query,
'tbm': 'isch',
'hl': 'en',
'ijn': '0',
}
UpperCamelCase = requests.get('https://www.google.com/search' , params=A , headers=A)
UpperCamelCase = BeautifulSoup(html.text , 'html.parser')
UpperCamelCase = ''.join(
re.findall(r'AF_initDataCallback\(([^<]+)\);' , str(soup.select('script'))))
UpperCamelCase = json.dumps(A)
UpperCamelCase = json.loads(A)
UpperCamelCase = re.findall(
r'\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",' , A , )
if not matched_google_image_data:
return 0
UpperCamelCase = re.sub(
r'\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]' , '' , str(A) , )
UpperCamelCase = re.findall(
r'(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]' , A , )
for index, fixed_full_res_image in enumerate(A):
if index >= max_images:
return index
UpperCamelCase = bytes(A , 'ascii').decode(
'unicode-escape')
UpperCamelCase = bytes(A , 'ascii').decode(
'unicode-escape')
UpperCamelCase = urllib.request.build_opener()
UpperCamelCase = [
(
'User-Agent',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582',
)
]
urllib.request.install_opener(A)
UpperCamelCase = f'''query_{query.replace(" " , "_")}'''
if not os.path.exists(A):
os.makedirs(A)
urllib.request.urlretrieve( # noqa: S310
A , f'''{path_name}/original_size_img_{index}.jpg''')
return index
if __name__ == "__main__":
try:
lowerCAmelCase : Dict = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 3
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
def __magic_name__( self , __UpperCAmelCase ):
lowerCAmelCase__ : Union[str, Any] = 3
lowerCAmelCase__ : Tuple = 250
lowerCAmelCase__ : List[Any] = ids_tensor((batch_size, length) , __UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = torch.ones((batch_size, length) , device=__UpperCAmelCase , dtype=torch.float ) / length
return input_ids, scores
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = MaxLengthCriteria(max_length=10 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self._get_tensors(10 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def __magic_name__( self ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self._get_tensors(5 )
lowerCAmelCase__ : Any = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ : int = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(__UpperCAmelCase , __UpperCAmelCase ) )
def __magic_name__( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(__UpperCAmelCase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase__ : List[str] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
| 678
| 0
|
from __future__ import annotations
class lowercase :
def __init__( self : Dict , _UpperCamelCase : str , _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text, pattern
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = len(_UpperCamelCase ), len(_UpperCamelCase )
def __snake_case( self : Optional[int] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __snake_case( self : Union[str, Any] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __snake_case( self : Optional[int] ) -> list[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = []
for i in range(self.textLen - self.patLen + 1 ):
SCREAMING_SNAKE_CASE = self.mismatch_in_text(_UpperCamelCase )
if mismatch_index == -1:
positions.append(_UpperCamelCase )
else:
SCREAMING_SNAKE_CASE = self.match_in_pattern(self.text[mismatch_index] )
SCREAMING_SNAKE_CASE = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase : Optional[int] = '''ABAABA'''
_lowerCamelCase : Tuple = '''AB'''
_lowerCamelCase : str = BoyerMooreSearch(text, pattern)
_lowerCamelCase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 647
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCamelCase : Optional[int] = logging.getLogger(__name__)
_lowerCamelCase : Optional[int] = '''Hello world! cécé herlolip'''
_lowerCamelCase : List[Any] = namedtuple(
'''BertAbsConfig''',
[
'''temp_dir''',
'''large''',
'''use_bert_emb''',
'''finetune_bert''',
'''encoder''',
'''share_emb''',
'''max_pos''',
'''enc_layers''',
'''enc_hidden_size''',
'''enc_heads''',
'''enc_ff_size''',
'''enc_dropout''',
'''dec_layers''',
'''dec_hidden_size''',
'''dec_heads''',
'''dec_ff_size''',
'''dec_dropout''',
],
)
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[int] ):
SCREAMING_SNAKE_CASE = BertAbsConfig(
temp_dir="." , finetune_bert=UpperCAmelCase__ , large=UpperCAmelCase__ , share_emb=UpperCAmelCase__ , use_bert_emb=UpperCAmelCase__ , encoder="bert" , max_pos=5_1_2 , enc_layers=6 , enc_hidden_size=5_1_2 , enc_heads=8 , enc_ff_size=5_1_2 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_6_8 , dec_heads=8 , dec_ff_size=2_0_4_8 , dec_dropout=0.2 , )
SCREAMING_SNAKE_CASE = torch.load(UpperCAmelCase__ , lambda UpperCAmelCase__ , UpperCAmelCase__ : storage )
SCREAMING_SNAKE_CASE = AbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) , UpperCAmelCase__ )
original.eval()
SCREAMING_SNAKE_CASE = BertAbsSummarizer(UpperCAmelCase__ , torch.device("cpu" ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info("convert the model" )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info("Make sure that the models' outputs are identical" )
SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("bert-base-uncased" )
# prepare the model inputs
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample éàalj'-." )
encoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
SCREAMING_SNAKE_CASE = tokenizer.encode("This is sample 3 éàalj'-." )
decoder_input_ids.extend([tokenizer.pad_token_id] * (5_1_2 - len(UpperCAmelCase__ )) )
SCREAMING_SNAKE_CASE = torch.tensor(UpperCAmelCase__ ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
SCREAMING_SNAKE_CASE = encoder_input_ids
SCREAMING_SNAKE_CASE = decoder_input_ids
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
SCREAMING_SNAKE_CASE = original(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = original.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = new_model(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )[0]
SCREAMING_SNAKE_CASE = new_model.generator(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print("Maximum absolute difference beween weights: {:.2f}".format(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE = torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1e-3 )
if are_identical:
logging.info("all weights are equal up to 1e-3" )
else:
raise ValueError("the weights are different. The new model is likely different from the original one." )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info("saving the model's state dictionary" )
torch.save(
new_model.state_dict() , "./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin" )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--bertabs_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the output PyTorch model.''',
)
_lowerCamelCase : Any = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 647
| 1
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: float ) -> float:
"""simple docstring"""
return 0.0
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
lowercase__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 5_12
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.abs(np.fft.fft(SCREAMING_SNAKE_CASE ) )
lowercase__ = 20 * np.logaa(SCREAMING_SNAKE_CASE )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
lowercase__ = get_bounds(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(SCREAMING_SNAKE_CASE )
plt.show()
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = 5_12
lowercase__ = [1] + [0] * (size - 1)
lowercase__ = [filter_type.process(SCREAMING_SNAKE_CASE ) for item in inputs]
lowercase__ = [0] * (samplerate - size) # zero-padding
outputs += filler
lowercase__ = np.angle(np.fft.fft(SCREAMING_SNAKE_CASE ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(SCREAMING_SNAKE_CASE , -2 * pi ) )
plt.show()
| 43
|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
A__ : Any = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
A__ : Tuple = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Any = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2''', '''rougeL'''] )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=['''rouge2'''] )
assert (
pd.DataFrame(no_aggregation['''rouge2'''] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['''rouge2'''] ).fmeasure.mean()
)
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Dict = '''rougeLsum'''
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
_lowercase: List[str] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Tuple = ['''rouge1''', '''rouge2''', '''rougeL''']
_lowercase: Dict = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
assert score_sep == score_no_sep
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
_lowercase: Union[str, Any] = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase )
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: int = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
_lowercase: Union[str, Any] = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
_lowercase: List[Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] , newline_sep=_UpperCamelCase )['''rougeLsum''']
_lowercase: Union[str, Any] = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=['''rougeLsum'''] )['''rougeLsum''']
assert new_score > prev_score
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: List[str] = Path('''examples/seq2seq/test_data/wmt_en_ro''' )
_lowercase: int = calculate_rouge_path(data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
_lowercase: Optional[int] = calculate_rouge_path(
data_dir.joinpath('''test.source''' ) , data_dir.joinpath('''test.target''' ) , bootstrap_aggregation=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
| 353
| 0
|
"""simple docstring"""
import math
from collections.abc import Iterator
from itertools import takewhile
def snake_case_ ( A_ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5, int(math.sqrt(A_ ) + 1 ), 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( ):
'''simple docstring'''
_lowerCamelCase : str = 2
while True:
if is_prime(A_ ):
yield num
num += 1
def snake_case_ ( A_ : int = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda A_ : x < n, prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 598
|
"""simple docstring"""
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase):
@parameterized.expand([(None,), ('''foo.json''',)] )
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
_lowerCamelCase : int = GenerationConfig.from_pretrained(__lowerCAmelCase , config_name=__lowerCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 5_0 )
self.assertEqual(loaded_config.max_length , 2_0 )
self.assertEqual(loaded_config.max_time , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Dict = AutoConfig.from_pretrained('''gpt2''' )
_lowerCamelCase : List[Any] = GenerationConfig.from_model_config(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(__lowerCAmelCase , __lowerCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = GenerationConfig()
_lowerCamelCase : Any = {
'''max_new_tokens''': 1_0_2_4,
'''foo''': '''bar''',
}
_lowerCamelCase : Optional[Any] = copy.deepcopy(__lowerCAmelCase )
_lowerCamelCase : List[str] = generation_config.update(**__lowerCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(__lowerCAmelCase , __lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_0_2_4 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(__lowerCAmelCase , {'''foo''': '''bar'''} )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig()
_lowerCamelCase : str = '''bar'''
with tempfile.TemporaryDirectory('''test-generation-config''' ) as tmp_dir:
generation_config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , '''bar''' )
_lowerCamelCase : Any = GenerationConfig.from_model_config(__lowerCAmelCase )
assert not hasattr(__lowerCAmelCase , '''foo''' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , __lowerCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , __lowerCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__lowerCAmelCase )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(__lowerCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , __lowerCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any ):
"""simple docstring"""
_lowerCamelCase : Dict = TOKEN
HfFolder.save_token(__lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='''test-generation-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-generation-config-org''' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : int = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''test-generation-config''' , use_auth_token=self._token )
_lowerCamelCase : Tuple = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-generation-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''test-generation-config''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = GenerationConfig(
do_sample=__lowerCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('''valid_org/test-generation-config-org''' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-generation-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
__lowerCAmelCase , repo_id='''valid_org/test-generation-config-org''' , push_to_hub=__lowerCAmelCase , use_auth_token=self._token )
_lowerCamelCase : str = GenerationConfig.from_pretrained('''valid_org/test-generation-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(__lowerCAmelCase , getattr(__lowerCAmelCase , __lowerCAmelCase ) )
| 598
| 1
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__A : Tuple = logging.get_logger(__name__)
__A : Optional[Any] = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
__A : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase = model_type_to_module_name(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(SCREAMING_SNAKE_CASE_ , '''__name__''' , SCREAMING_SNAKE_CASE_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase = importlib.import_module('''transformers''' )
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return None
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , **UpperCamelCase__ , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase = get_file_from_repo(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , force_download=SCREAMING_SNAKE_CASE_ , resume_download=SCREAMING_SNAKE_CASE_ , proxies=SCREAMING_SNAKE_CASE_ , use_auth_token=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , local_files_only=SCREAMING_SNAKE_CASE_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as reader:
return json.load(SCREAMING_SNAKE_CASE_ )
class A_ :
def __init__( self ):
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def _lowercase ( cls , _A , **_A ):
'''simple docstring'''
UpperCAmelCase = kwargs.pop('''config''' , _UpperCamelCase )
UpperCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCamelCase )
UpperCAmelCase = True
UpperCAmelCase , UpperCAmelCase = ImageProcessingMixin.get_image_processor_dict(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = config_dict.get('''image_processor_type''' , _UpperCamelCase )
UpperCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
UpperCAmelCase = config_dict.pop('''feature_extractor_type''' , _UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
UpperCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
UpperCAmelCase = config_dict['''auto_map''']['''AutoFeatureExtractor''']
UpperCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.image_processor_type``
UpperCAmelCase = getattr(_UpperCamelCase , '''image_processor_type''' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
UpperCAmelCase = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
UpperCAmelCase = image_processor_class_from_name(_UpperCamelCase )
UpperCAmelCase = image_processor_auto_map is not None
UpperCAmelCase = image_processor_class is not None or type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
UpperCAmelCase = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
UpperCAmelCase = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase = kwargs.pop('''code_revision''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
UpperCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_UpperCamelCase )]
return image_processor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def _lowercase ( _A , _A ):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 130
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCamelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase=13 , _UpperCamelCase=7 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=24 , _UpperCamelCase=2 , _UpperCamelCase=6 , _UpperCamelCase=37 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=3 , _UpperCamelCase=None , _UpperCamelCase=1000 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = range_bbox
def UpperCamelCase( self ):
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCAmelCase = bbox[i, j, 3]
_UpperCAmelCase = bbox[i, j, 1]
_UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCAmelCase = bbox[i, j, 2]
_UpperCAmelCase = bbox[i, j, 0]
_UpperCAmelCase = t
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase( self ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_UpperCAmelCase = model(_UpperCamelCase , bbox=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = LiltForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
_UpperCAmelCase = LiltForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
_UpperCamelCase , bbox=_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase( self ):
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( A__ , A__ , A__ , unittest.TestCase ):
__A : Dict = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Optional[Any] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : List[Any] = False
__A : Optional[int] = False
def UpperCamelCase( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return True
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def UpperCamelCase( self ):
self.config_tester.run_common_tests()
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
def UpperCamelCase( self ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
@slow
def UpperCamelCase( self ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = LiltModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@require_torch
@slow
class __UpperCamelCase ( unittest.TestCase ):
def UpperCamelCase( self ):
_UpperCAmelCase = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[1, 2]] , device=_UpperCamelCase )
_UpperCAmelCase = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(input_ids=_UpperCamelCase , bbox=_UpperCamelCase )
_UpperCAmelCase = torch.Size([1, 2, 768] )
_UpperCAmelCase = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=_UpperCamelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _UpperCamelCase , atol=1e-3 ) )
| 32
| 0
|
from ..utils import DummyObject, requires_backends
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __A( metaclass=_UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ['''torch''', '''transformers''', '''onnx''']
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase_ (cls , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 704
|
lowerCamelCase_ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def __magic_name__ ( __a : int ):
'''simple docstring'''
UpperCamelCase__ = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase_ = [None] * 10_00_00_00
lowerCamelCase_ = True
lowerCamelCase_ = False
def __magic_name__ ( __a : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCamelCase__ = chain(next_number(__a ) )
UpperCamelCase__ = number_chain
while number < 10_000_000:
UpperCamelCase__ = number_chain
number *= 10
return number_chain
def __magic_name__ ( __a : int = 10_000_000 ):
'''simple docstring'''
for i in range(1 , __a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'{solution() = }')
| 86
| 0
|
'''simple docstring'''
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def __UpperCAmelCase ( a_: List[Any], a_: Tuple, a_: Optional[Any], a_: Optional[Any]=5 ):
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("<mask>" ) == 1
_UpperCAmelCase : Union[str, Any] = torch.tensor(tokenizer.encode(a_, add_special_tokens=a_ ) ).unsqueeze(0 ) # Batch size 1
_UpperCAmelCase : List[str] = model(a_ )[0] # The last hidden-state is the first element of the output tuple
_UpperCAmelCase : Dict = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_UpperCAmelCase : Dict = logits[0, masked_index, :]
_UpperCAmelCase : Dict = logits.softmax(dim=0 )
_UpperCAmelCase , _UpperCAmelCase : Tuple = prob.topk(k=a_, dim=0 )
_UpperCAmelCase : Dict = " ".join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(a_ ) )] )
_UpperCAmelCase : Optional[Any] = tokenizer.mask_token
_UpperCAmelCase : int = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" " ) ):
_UpperCAmelCase : Union[str, Any] = predicted_token_bpe.replace("\u2581", " " )
if " {0}".format(a_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(" {0}".format(a_ ), a_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(a_, a_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
__a = CamembertTokenizer.from_pretrained('camembert-base')
__a = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
__a = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 494
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Any = '''timm_backbone'''
def __init__( self : Dict , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=3 , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : Tuple=None , **lowerCAmelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = backbone
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Any = features_only
_UpperCAmelCase : Optional[int] = use_pretrained_backbone
_UpperCAmelCase : Dict = True
_UpperCAmelCase : Dict = out_indices if out_indices is not None else (-1,)
| 494
| 1
|
'''simple docstring'''
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str , snake_case : str ) -> List[Any]:
"""simple docstring"""
def get_masked_lm_array(snake_case : str ):
a : str = F"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : Optional[Any] = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
a : int = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_array(snake_case : str ):
a : Union[str, Any] = F"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : int = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
a : Optional[Any] = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_layer_array(snake_case : int , snake_case : str ):
a : Union[str, Any] = F"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : str = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
if "kernel" in name:
a : List[str] = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
def get_encoder_attention_layer_array(snake_case : int , snake_case : str , snake_case : Any ):
a : Optional[int] = F"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
a : Dict = tf.train.load_variable(UpperCamelCase__ , UpperCamelCase__ )
a : str = array.reshape(UpperCamelCase__ )
if "kernel" in name:
a : Optional[Any] = array.transpose()
return torch.from_numpy(UpperCamelCase__ )
print(F"""Loading model based on config from {config_path}...""" )
a : Union[str, Any] = BertConfig.from_json_file(UpperCamelCase__ )
a : Optional[Any] = BertForMaskedLM(UpperCamelCase__ )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
a : List[str] = model.bert.encoder.layer[layer_index]
# Self-attention
a : int = layer.attention.self
a : List[Any] = get_encoder_attention_layer_array(
UpperCamelCase__ , '_query_dense/kernel' , self_attn.query.weight.data.shape )
a : Dict = get_encoder_attention_layer_array(
UpperCamelCase__ , '_query_dense/bias' , self_attn.query.bias.data.shape )
a : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '_key_dense/kernel' , self_attn.key.weight.data.shape )
a : Dict = get_encoder_attention_layer_array(
UpperCamelCase__ , '_key_dense/bias' , self_attn.key.bias.data.shape )
a : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '_value_dense/kernel' , self_attn.value.weight.data.shape )
a : Tuple = get_encoder_attention_layer_array(
UpperCamelCase__ , '_value_dense/bias' , self_attn.value.bias.data.shape )
# Self-attention Output
a : Union[str, Any] = layer.attention.output
a : int = get_encoder_attention_layer_array(
UpperCamelCase__ , '_output_dense/kernel' , self_output.dense.weight.data.shape )
a : Dict = get_encoder_attention_layer_array(
UpperCamelCase__ , '_output_dense/bias' , self_output.dense.bias.data.shape )
a : Dict = get_encoder_layer_array(UpperCamelCase__ , '_attention_layer_norm/gamma' )
a : str = get_encoder_layer_array(UpperCamelCase__ , '_attention_layer_norm/beta' )
# Intermediate
a : Any = layer.intermediate
a : List[Any] = get_encoder_layer_array(UpperCamelCase__ , '_intermediate_dense/kernel' )
a : Any = get_encoder_layer_array(UpperCamelCase__ , '_intermediate_dense/bias' )
# Output
a : Any = layer.output
a : Optional[int] = get_encoder_layer_array(UpperCamelCase__ , '_output_dense/kernel' )
a : List[str] = get_encoder_layer_array(UpperCamelCase__ , '_output_dense/bias' )
a : Union[str, Any] = get_encoder_layer_array(UpperCamelCase__ , '_output_layer_norm/gamma' )
a : Tuple = get_encoder_layer_array(UpperCamelCase__ , '_output_layer_norm/beta' )
# Embeddings
a : List[str] = get_encoder_array('_position_embedding_layer/embeddings' )
a : Any = get_encoder_array('_type_embedding_layer/embeddings' )
a : int = get_encoder_array('_embedding_norm_layer/gamma' )
a : Optional[int] = get_encoder_array('_embedding_norm_layer/beta' )
# LM Head
a : List[Any] = model.cls.predictions.transform
a : Dict = get_masked_lm_array('dense/kernel' )
a : Dict = get_masked_lm_array('dense/bias' )
a : Any = get_masked_lm_array('layer_norm/gamma' )
a : Union[str, Any] = get_masked_lm_array('layer_norm/beta' )
a : Union[str, Any] = get_masked_lm_array('embedding_table' )
# Pooling
a : Tuple = BertPooler(config=UpperCamelCase__ )
a : str = get_encoder_array('_pooler_layer/kernel' )
a : int = get_encoder_array('_pooler_layer/bias' )
# Export final model
model.save_pretrained(UpperCamelCase__ )
# Integration test - should load without any errors ;)
a : List[str] = BertForMaskedLM.from_pretrained(UpperCamelCase__ )
print(new_model.eval() )
print('Model conversion was done sucessfully!' )
if __name__ == "__main__":
UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow Token Dropping checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model.""",
)
UpperCamelCase : str = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 715
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : int = """\
@inproceedings{kakwani2020indicnlpsuite,
title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},
author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},
year={2020},
booktitle={Findings of EMNLP},
}
"""
UpperCamelCase : List[str] = """\
IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide
variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.
"""
UpperCamelCase : Tuple = """
Compute IndicGLUE evaluation metric associated to each IndicGLUE dataset.
Args:
predictions: list of predictions to score (as int64),
except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).
references: list of ground truth labels corresponding to the predictions (as int64),
except for 'cvit-mkb-clsr' where each reference is a vector (of float32).
Returns: depending on the IndicGLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"precision\": Precision@10
Examples:
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')
>>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]
>>> results = indic_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'precision@10': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
a : Optional[Any] = simple_accuracy(snake_case , snake_case )
a : Dict = float(fa_score(y_true=snake_case , y_pred=snake_case ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : int ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = np.array(snake_case )
a : Any = np.array(snake_case )
a : Tuple = en_sentvecs.shape[0]
# mean centering
a : Tuple = en_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[Any] = in_sentvecs - np.mean(snake_case , axis=0 )
a : Optional[int] = cdist(snake_case , snake_case , 'cosine' )
a : List[Any] = np.array(range(snake_case ) )
a : str = sim.argsort(axis=1 )[:, :10]
a : int = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(UpperCAmelCase_ , UpperCAmelCase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(UpperCAmelCase_ , UpperCAmelCase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(UpperCAmelCase_ , UpperCAmelCase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 610
| 0
|
'''simple docstring'''
import os
from collections.abc import Iterator
def __UpperCAmelCase ( A : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(A ):
UpperCAmelCase_ : List[str] = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(A )[1] in (".py", ".ipynb"):
yield os.path.join(A , A ).lstrip('''./''' )
def __UpperCAmelCase ( A : List[Any] ) -> Tuple:
return F"{i * ' '}*" if i else "\n##"
def __UpperCAmelCase ( A : str , A : str ) -> str:
UpperCAmelCase_ : Dict = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(A ) or old_parts[i] != new_part) and new_part:
print(F"{md_prefix(A )} {new_part.replace('_' , ' ' ).title()}" )
return new_path
def __UpperCAmelCase ( A : str = "." ) -> None:
UpperCAmelCase_ : Dict = ''''''
for filepath in sorted(good_file_paths(A ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = os.path.split(A )
if filepath != old_path:
UpperCAmelCase_ : Optional[Any] = print_path(A , A )
UpperCAmelCase_ : int = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase_ : Any = F"{filepath}/{filename}".replace(''' ''' , '''%20''' )
UpperCAmelCase_ : List[str] = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(F"{md_prefix(A )} [{filename}]({url})" )
if __name__ == "__main__":
print_directory_md('.')
| 541
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_UpperCamelCase : List[str] = logging.getLogger(__name__)
_UpperCamelCase : int = 'pytorch_model.bin'
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."})
a_ = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "A csv or a json file containing the validation data."})
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The name of the task to train on."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "The list of labels for the task."})
@dataclasses.dataclass
class snake_case__ :
a_ = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."})
a_ = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."})
a_ = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
a_ = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
a_ = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
a_ = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
a_ = dataclasses.field(
default=UpperCamelCase , metadata={"help": "Random seed for initialization."} , )
def __UpperCAmelCase ( A : str , A : Optional[Any] , A : List[Any] , A : Any , A : Union[str, Any] , A : Dict ) -> int:
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : Tuple = dataset.filter(lambda A : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : Dict = int(eval_result * len(A ) )
print(A )
UpperCAmelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=A )
UpperCAmelCase_ : Optional[Any] = dataset.select(range(A ) )
UpperCAmelCase_ : Dict = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase_ : List[Any] = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase_ : List[str] = dataset.map(lambda A : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : Union[str, Any] = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : str = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(A , index=A )
else:
dataset.to_json(A )
def __UpperCAmelCase ( A : Any , A : int , A : Union[str, Any] , A : Dict , **A : Any ) -> Dict:
UpperCAmelCase_ : List[str] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=A )
UpperCAmelCase_ : int = STDataArguments(train_file=A , infer_file=A )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=A )
UpperCAmelCase_ : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(A ).items():
setattr(A , A , A )
for key, value in kwargs.items():
if hasattr(A , A ):
setattr(A , A , A )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Optional[int] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : List[str] = args.eval_file
for key in data_files:
UpperCAmelCase_ : Dict = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
UpperCAmelCase_ : Any = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase_ : Any = F"{args.output_dir}/self-train_iter-{{}}".format
UpperCAmelCase_ : Any = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=A )
os.makedirs(A , exist_ok=A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : int = False
# Show the progress bar
UpperCAmelCase_ : int = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : str = data_dir_format(A )
assert os.path.exists(A )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[Any] = os.path.join(A , '''stage-1''' )
UpperCAmelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(A , A ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : int = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , A )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Any = os.path.join(A , '''best-checkpoint''' )
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase_ : List[Any] = model_path
UpperCAmelCase_ : Optional[int] = data_files['''train''']
UpperCAmelCase_ : Union[str, Any] = current_output_dir
UpperCAmelCase_ : Optional[int] = os.path.join(A , '''best-checkpoint''' , A )
if os.path.exists(A ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , A , A , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , A )
finetune(**A )
accelerator.wait_for_everyone()
assert os.path.exists(A )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , A )
UpperCAmelCase_ : Dict = iteration
UpperCAmelCase_ : Optional[Any] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : Any = AutoConfig.from_pretrained(os.path.join(A , '''best-checkpoint''' ) )
UpperCAmelCase_ : Optional[Any] = config.idalabel
UpperCAmelCase_ : List[Any] = os.path.join(A , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase_ : Union[str, Any] = os.path.join(A , '''test_results_best-checkpoint.json''' )
assert os.path.exists(A )
with open(A , '''r''' ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(A )[args.eval_metric] )
UpperCAmelCase_ : List[Any] = os.path.join(A , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(A )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : str = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase_ : Union[str, Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(A , exist_ok=A )
shutil.copy(A , os.path.join(A , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(A ):
shutil.copy(A , os.path.join(A , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(A , A , A , A , A , A )
accelerator.wait_for_everyone()
UpperCAmelCase_ : int = os.path.join(A , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : List[str] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Any = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : str = new_iteration
UpperCAmelCase_ : Optional[int] = new_eval_result
UpperCAmelCase_ : Any = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : int = new_iteration
UpperCAmelCase_ : List[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , A )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{iteration}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , A )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(A , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(A , '''eval_results_best-iteration.json''' ) , )
| 541
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowercase :
'''simple docstring'''
__lowerCAmelCase : List[str]
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""Translation""" , init=__lowercase , repr=__lowercase)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def a_ ( self : Optional[int] ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowercase :
'''simple docstring'''
__lowerCAmelCase : Optional[List] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "dict"
__lowerCAmelCase : ClassVar[Any] = None
__lowerCAmelCase : str = field(default="""TranslationVariableLanguages""" , init=__lowercase , repr=__lowercase)
def a_ ( self : Optional[int] ):
"""simple docstring"""
A_ : Tuple = sorted(set(self.languages ) ) if self.languages else None
A_ : List[Any] = len(self.languages ) if self.languages else None
def __call__( self : Optional[Any] ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def a_ ( self : Optional[int] , _lowerCamelCase : List[Any] ):
"""simple docstring"""
A_ : str = set(self.languages )
if self.languages and set(__a ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(__a ) - lang_set ) )}) are not in valid set ({', '.join(__a )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
A_ : Any = []
for lang, text in translation_dict.items():
if isinstance(__a , __a ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
A_ : Tuple = zip(*sorted(__a ) )
return {"language": languages, "translation": translations}
def a_ ( self : Tuple ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 720
|
"""simple docstring"""
from typing import Any
import numpy as np
def lowercase_ ( _UpperCAmelCase ):
"""simple docstring"""
return np.array_equal(_UpperCAmelCase , matrix.conjugate().T )
def lowercase_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = v.conjugate().T
A_ : Tuple = v_star.dot(_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , np.ndarray )
return (v_star_dot.dot(_UpperCAmelCase )) / (v_star.dot(_UpperCAmelCase ))
def lowercase_ ( ):
"""simple docstring"""
A_ : Optional[Any] = np.array([[2, 2 + 1J, 4], [2 - 1J, 3, 1J], [4, -1J, 1]] )
A_ : str = np.array([[1], [2], [3]] )
assert is_hermitian(_UpperCAmelCase ), f"""{a} is not hermitian."""
print(rayleigh_quotient(_UpperCAmelCase , _UpperCAmelCase ) )
A_ : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(_UpperCAmelCase ), f"""{a} is not hermitian."""
assert rayleigh_quotient(_UpperCAmelCase , _UpperCAmelCase ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 361
| 0
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A = logging.get_logger(__name__)
__A = {"vocab_file": "spiece.model"}
__A = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class _A ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict=False , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : Dict="<s>" , __SCREAMING_SNAKE_CASE : Optional[Any]="</s>" , __SCREAMING_SNAKE_CASE : str="<unk>" , __SCREAMING_SNAKE_CASE : str="<sep>" , __SCREAMING_SNAKE_CASE : Any="<pad>" , __SCREAMING_SNAKE_CASE : Tuple="<cls>" , __SCREAMING_SNAKE_CASE : Optional[int]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=["<eop>", "<eod>"] , __SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> None:
__UpperCAmelCase =AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
__UpperCAmelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
__UpperCAmelCase =3
__UpperCAmelCase =do_lower_case
__UpperCAmelCase =remove_space
__UpperCAmelCase =keep_accents
__UpperCAmelCase =vocab_file
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
__UpperCAmelCase =jieba
__UpperCAmelCase =str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _a ( self : Optional[Any] ) -> List[Any]:
return len(self.sp_model )
def _a ( self : Optional[int] ) -> List[Any]:
__UpperCAmelCase ={self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> List[str]:
__UpperCAmelCase =self.__dict__.copy()
__UpperCAmelCase =None
return state
def __setstate__( self : str , __SCREAMING_SNAKE_CASE : Tuple ) -> str:
__UpperCAmelCase =d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__UpperCAmelCase ={}
__UpperCAmelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
if self.remove_space:
__UpperCAmelCase =""" """.join(inputs.strip().split() )
else:
__UpperCAmelCase =inputs
__UpperCAmelCase =outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
__UpperCAmelCase =unicodedata.normalize("""NFKD""" , __SCREAMING_SNAKE_CASE )
__UpperCAmelCase ="""""".join([c for c in outputs if not unicodedata.combining(__SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
__UpperCAmelCase =outputs.lower()
return outputs
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
__UpperCAmelCase =self.preprocess_text(__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =[]
for piece in pieces:
if len(__SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
__UpperCAmelCase =self.sp_model.EncodeAsPieces(piece[:-1].replace(__SCREAMING_SNAKE_CASE , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__UpperCAmelCase =cur_pieces[1:]
else:
__UpperCAmelCase =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__SCREAMING_SNAKE_CASE )
else:
new_pieces.append(__SCREAMING_SNAKE_CASE )
return new_pieces
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> str:
return self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
def _a ( self : str , __SCREAMING_SNAKE_CASE : int ) -> Any:
return self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
def _a ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
__UpperCAmelCase ="""""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE , """ """ ).strip()
return out_string
def _a ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _a ( self : str , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None , __SCREAMING_SNAKE_CASE : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE , token_ids_a=__SCREAMING_SNAKE_CASE , already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1]
def _a ( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase =[self.sep_token_id]
__UpperCAmelCase =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _a ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__UpperCAmelCase =os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , """wb""" ) as fi:
__UpperCAmelCase =self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
def _a ( self : List[Any] , *__SCREAMING_SNAKE_CASE : Any , **__SCREAMING_SNAKE_CASE : Dict ) -> Dict:
__UpperCAmelCase =super()._decode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text
| 68
|
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class a :
"""simple docstring"""
a : int
a : Node | None = None
a : Node | None = None
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = Node(1 )
__UpperCAmelCase : int = Node(2 )
__UpperCAmelCase : Optional[Any] = Node(3 )
__UpperCAmelCase : Dict = Node(4 )
__UpperCAmelCase : Tuple = Node(5 )
return tree
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
__UpperCAmelCase : list[Any] = []
if root is None:
return output
__UpperCAmelCase : Tuple = deque([root] )
while process_queue:
__UpperCAmelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None , __lowerCamelCase : int ):
__UpperCAmelCase : list[Any] = []
def populate_output(__lowerCamelCase : Node | None , __lowerCamelCase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(__lowerCamelCase , __lowerCamelCase )
return output
def lowerCamelCase__ ( __lowerCamelCase : Node | None ):
if root is None:
return []
__UpperCAmelCase : list[Sequence[Node | None]] = []
__UpperCAmelCase : Optional[int] = 0
__UpperCAmelCase : int = height(__lowerCamelCase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Optional[int] = 0
return output
def lowerCamelCase__ ( ): # Main function for testing.
__UpperCAmelCase : List[Any] = make_tree()
print(f"""In-order Traversal: {inorder(__lowerCamelCase )}""" )
print(f"""Pre-order Traversal: {preorder(__lowerCamelCase )}""" )
print(f"""Post-order Traversal: {postorder(__lowerCamelCase )}""" , """\n""" )
print(f"""Height of Tree: {height(__lowerCamelCase )}""" , """\n""" )
print("""Complete Level Order Traversal: """ )
print(level_order(__lowerCamelCase ) , """\n""" )
print("""Level-wise order Traversal: """ )
for level in range(1 , height(__lowerCamelCase ) + 1 ):
print(f"""Level {level}:""" , get_nodes_from_left_to_right(__lowerCamelCase , level=__lowerCamelCase ) )
print("""\nZigZag order Traversal: """ )
print(zigzag(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63
| 0
|
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def a ( snake_case__: List[Any] ):
'''simple docstring'''
return x + 2
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> Union[str, Any]:
lowercase_ = "x = 3"
lowercase_ = {}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3} )
lowercase_ = "x = y"
lowercase_ = {"y": 5}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 5, '''y''': 5} )
def _lowercase ( self : int ) -> Optional[Any]:
lowercase_ = "y = add_two(x)"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result is None
assert "tried to execute add_two" in out.out
def _lowercase ( self : Optional[Any] ) -> Tuple:
lowercase_ = "x = 3"
lowercase_ = {}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3} )
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _lowercase ( self : Dict ) -> int:
lowercase_ = "x = 3\ny = 5"
lowercase_ = {}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''y''': 5} )
def _lowercase ( self : str ) -> Dict:
lowercase_ = "text = f'This is x: {x}.'"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _lowercase ( self : Dict ) -> int:
lowercase_ = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''y''': 2} )
lowercase_ = {"x": 8}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 8, '''y''': 5} )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = "test_list = [x, add_two(x)]"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [3, 5] )
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''test_list''': [3, 5]} )
def _lowercase ( self : int ) -> Tuple:
lowercase_ = "y = x"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {} , state=SCREAMING_SNAKE_CASE_ )
assert result == 3
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''y''': 3} )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''test_list''': [3, 5]} )
lowercase_ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase_ = {"x": 3}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''add_two''': add_two} , state=SCREAMING_SNAKE_CASE_ )
assert result == 5
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _lowercase ( self : Dict ) -> Tuple:
lowercase_ = "x = 0\nfor i in range(3):\n x = i"
lowercase_ = {}
lowercase_ = evaluate(SCREAMING_SNAKE_CASE_ , {'''range''': range} , state=SCREAMING_SNAKE_CASE_ )
assert result == 2
self.assertDictEqual(SCREAMING_SNAKE_CASE_ , {'''x''': 2, '''i''': 2} )
| 706
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : Any=7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : str=False , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]=9_9 , SCREAMING_SNAKE_CASE_ : int=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[int]=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Optional[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str ) -> Tuple:
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ) -> Optional[int]:
lowercase_ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
lowercase_ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Any:
lowercase_ = self.num_labels
lowercase_ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Dict:
lowercase_ = self.num_choices
lowercase_ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.prepare_config_and_inputs()
((lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_) , (lowercase_)) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :List[str] = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a :Dict = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :List[Any] = True
a :Tuple = True
a :List[str] = True
a :Optional[Any] = True
def _lowercase ( self : str ) -> Dict:
lowercase_ = DistilBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=3_7 )
def _lowercase ( self : Tuple ) -> str:
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ) -> Tuple:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> List[str]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : List[Any] ) -> Optional[Any]:
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def _lowercase ( self : int ) -> Any:
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowercase_ = True
lowercase_ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowercase_ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase_ = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowercase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.tensor(
[[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 409
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Any
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase = None ) -> List[str]:
_snake_case = value
_snake_case = None # Added in order to delete a node easier
_snake_case = None
_snake_case = None
def __repr__(self ) -> int:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase = None ) -> Optional[int]:
_snake_case = root
def __str__(self ) -> Union[str, Any]:
return str(self.root )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
if new_children is not None: # reset its kids
_snake_case = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__lowercase ): # If it is the right children
_snake_case = new_children
else:
_snake_case = new_children
else:
_snake_case = new_children
def lowercase (self , UpperCAmelCase ) -> Dict:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def lowercase (self ) -> Tuple:
return self.root is None
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
_snake_case = Node(__lowercase ) # create a new Node
if self.empty(): # if Tree is empty
_snake_case = new_node # set its root
else: # Tree is not empty
_snake_case = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
_snake_case = new_node # We insert the new node in a leaf
break
else:
_snake_case = parent_node.left
else:
if parent_node.right is None:
_snake_case = new_node
break
else:
_snake_case = parent_node.right
_snake_case = parent_node
def lowercase (self , *UpperCAmelCase ) -> List[str]:
for value in values:
self.__insert(__lowercase )
def lowercase (self , UpperCAmelCase ) -> Dict:
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
_snake_case = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
_snake_case = node.left if value < node.value else node.right
return node
def lowercase (self , UpperCAmelCase = None ) -> Optional[int]:
if node is None:
if self.root is None:
return None
_snake_case = self.root
if not self.empty():
while node.right is not None:
_snake_case = node.right
return node
def lowercase (self , UpperCAmelCase = None ) -> Optional[int]:
if node is None:
_snake_case = self.root
if self.root is None:
return None
if not self.empty():
_snake_case = self.root
while node.left is not None:
_snake_case = node.left
return node
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.search(__lowercase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__lowercase , __lowercase )
elif node.left is None: # Has only right children
self.__reassign_nodes(__lowercase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__lowercase , node.left )
else:
_snake_case = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
_snake_case = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def lowercase (self , UpperCAmelCase=None ) -> Optional[int]:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Tuple:
if node:
self.inorder(__lowercase , node.left )
arr.append(node.value )
self.inorder(__lowercase , node.right )
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> str:
_snake_case = []
self.inorder(__lowercase , __lowercase ) # append all values to list using inorder traversal
return arr[k - 1]
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
_snake_case = []
if curr_node is not None:
_snake_case = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def __SCREAMING_SNAKE_CASE ( ):
_snake_case = (8, 3, 6, 1, 10, 14, 13, 4, 7)
_snake_case = BinarySearchTree()
for i in testlist:
t.insert(UpperCAmelCase_ )
# Prints all the elements of the list in order traversal
print(UpperCAmelCase_ )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn\'t exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn\'t exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(UpperCAmelCase_ )
print(UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 585
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class snake_case :
'''simple docstring'''
def __init__( self : int , __lowercase : Dict , __lowercase : int=13 , __lowercase : str=7 , __lowercase : List[str]=True , __lowercase : Union[str, Any]=True , __lowercase : List[Any]=True , __lowercase : Optional[int]=True , __lowercase : Dict=99 , __lowercase : int=64 , __lowercase : Dict=32 , __lowercase : Optional[Any]=5 , __lowercase : Tuple=4 , __lowercase : Optional[Any]=37 , __lowercase : Dict="gelu" , __lowercase : int=0.1 , __lowercase : Optional[int]=0.1 , __lowercase : Tuple=512 , __lowercase : List[str]=16 , __lowercase : Dict=2 , __lowercase : int=0.0_2 , __lowercase : Dict=3 , __lowercase : List[str]=4 , __lowercase : Optional[int]=None , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Any = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[str] = use_input_mask
__UpperCAmelCase : Optional[Any] = use_token_type_ids
__UpperCAmelCase : int = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Any = hidden_size
__UpperCAmelCase : str = embedding_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : List[str] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Any = hidden_dropout_prob
__UpperCAmelCase : List[str] = attention_probs_dropout_prob
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : Tuple = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Tuple = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : Optional[int] = scope
def A_ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : int = None
if self.use_input_mask:
__UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : List[Any] = None
if self.use_token_type_ids:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Dict = None
__UpperCAmelCase : str = None
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : List[Any] ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def A_ ( self : List[str] , __lowercase : Optional[int] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MobileBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : int = model(__lowercase , token_type_ids=__lowercase )
__UpperCAmelCase : Tuple = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A_ ( self : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Tuple , __lowercase : int , __lowercase : Optional[int] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = MobileBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Dict , __lowercase : str , __lowercase : Dict , __lowercase : Tuple , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Any , __lowercase : str ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForNextSentencePrediction(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def A_ ( self : List[Any] , __lowercase : List[Any] , __lowercase : Any , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : int , __lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertForPreTraining(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , next_sentence_label=__lowercase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def A_ ( self : str , __lowercase : List[Any] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : Union[str, Any] , __lowercase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : str = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A_ ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : int ):
'''simple docstring'''
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : Any = MobileBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : Dict , __lowercase : int , __lowercase : Tuple , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.num_labels
__UpperCAmelCase : int = MobileBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : int = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Tuple , __lowercase : str , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[str] , __lowercase : Any , __lowercase : Union[str, Any] , __lowercase : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.num_choices
__UpperCAmelCase : List[Any] = MobileBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : List[str] = model(
__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : Dict = config_and_inputs
__UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
_A : Tuple = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Dict = True
def A_ ( self : Any , __lowercase : List[Any] , __lowercase : Dict , __lowercase : Union[str, Any]=False ):
'''simple docstring'''
__UpperCAmelCase : Any = super()._prepare_for_class(__lowercase , __lowercase , return_labels=__lowercase )
if return_labels:
if model_class in get_values(__lowercase ):
__UpperCAmelCase : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowercase )
__UpperCAmelCase : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
return inputs_dict
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : str = MobileBertModelTester(self )
__UpperCAmelCase : str = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def A_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowercase )
def A_ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowercase )
def A_ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowercase )
def A_ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowercase )
def A_ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowercase )
def A_ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->List[Any]:
"""simple docstring"""
return torch.tensor(
UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ , )
lowercase__ :Dict = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(__lowercase )
__UpperCAmelCase : Optional[int] = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase : str = model(__lowercase )[0]
__UpperCAmelCase : Any = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowercase )
__UpperCAmelCase : int = torch.tensor(
[
[
[-2.473_6526e07, 8.269_1656e04, 1.652_1838e05],
[-5.754_1704e-01, 3.905_6022e00, 4.401_1507e00],
[2.604_7359e00, 1.567_7652e00, -1.732_4188e-01],
]
] , device=__lowercase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__UpperCAmelCase : str = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__UpperCAmelCase : List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 522
| 0
|
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase=None , _lowercase=None ):
# Recurse if needed
if "." in tensor_name:
SCREAMING_SNAKE_CASE : Dict = tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE : int = getattr(_lowercase , _lowercase )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE : List[str] = new_module
SCREAMING_SNAKE_CASE : Dict = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(f"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = tensor_name in module._buffers
SCREAMING_SNAKE_CASE : Optional[Any] = getattr(_lowercase , _lowercase )
if old_value.device == torch.device('''meta''' ) and device not in ["meta", torch.device('''meta''' )] and value is None:
raise ValueError(f"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
SCREAMING_SNAKE_CASE : Optional[int] = False
SCREAMING_SNAKE_CASE : Dict = False
if is_buffer or not is_bitsandbytes_available():
SCREAMING_SNAKE_CASE : List[str] = False
SCREAMING_SNAKE_CASE : List[str] = False
else:
SCREAMING_SNAKE_CASE : Dict = hasattr(bnb.nn , '''Params4bit''' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
SCREAMING_SNAKE_CASE : Any = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
SCREAMING_SNAKE_CASE : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
SCREAMING_SNAKE_CASE : Any = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = value.to('''cpu''' )
if value.dtype == torch.inta:
SCREAMING_SNAKE_CASE : List[Any] = version.parse(importlib.metadata.version('''bitsandbytes''' ) ) > version.parse(
'''0.37.2''' )
if not is_abit_serializable:
raise ValueError(
'''Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '''
'''Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.''' )
else:
SCREAMING_SNAKE_CASE : Dict = torch.tensor(_lowercase , device='''cpu''' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , _lowercase ) and fpaa_statistics is None:
SCREAMING_SNAKE_CASE : Optional[int] = new_value.T
SCREAMING_SNAKE_CASE : Dict = old_value.__dict__
if is_abit:
SCREAMING_SNAKE_CASE : List[Any] = bnb.nn.IntaParams(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
elif is_abit:
SCREAMING_SNAKE_CASE : str = bnb.nn.Paramsabit(_lowercase , requires_grad=_lowercase , **_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE : Tuple = new_value
if fpaa_statistics is not None:
setattr(module.weight , '''SCB''' , fpaa_statistics.to(_lowercase ) )
else:
if value is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = old_value.to(_lowercase )
elif isinstance(_lowercase , torch.Tensor ):
SCREAMING_SNAKE_CASE : List[Any] = value.to(_lowercase )
else:
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(_lowercase , device=_lowercase )
if is_buffer:
SCREAMING_SNAKE_CASE : List[Any] = new_value
else:
SCREAMING_SNAKE_CASE : Dict = nn.Parameter(_lowercase , requires_grad=old_value.requires_grad )
SCREAMING_SNAKE_CASE : Dict = new_value
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=False ):
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE : List[str] = []
current_key_name.append(_lowercase )
if (isinstance(_lowercase , nn.Linear ) or isinstance(_lowercase , _lowercase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '''.'''.join(_lowercase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = module.weight.shape
else:
SCREAMING_SNAKE_CASE : Tuple = module.in_features
SCREAMING_SNAKE_CASE : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
SCREAMING_SNAKE_CASE : str = bnb.nn.LinearabitLt(
_lowercase , _lowercase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
SCREAMING_SNAKE_CASE : Optional[int] = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
SCREAMING_SNAKE_CASE : List[Any] = bnb.nn.Linearabit(
_lowercase , _lowercase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
SCREAMING_SNAKE_CASE : int = True
# Store the module class in case we need to transpose the weight later
SCREAMING_SNAKE_CASE : Union[str, Any] = type(_lowercase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowercase )
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase , has_been_replaced=_lowercase , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A ( _lowercase , _lowercase=None , _lowercase=None , _lowercase=None ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ['''lm_head'''] if modules_to_not_convert is None else modules_to_not_convert
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = _replace_with_bnb_linear(
_lowercase , _lowercase , _lowercase , _lowercase )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def A ( *_lowercase , **_lowercase ):
warnings.warn(
'''`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead''' , _lowercase , )
return replace_with_bnb_linear(*_lowercase , **_lowercase )
def A ( *_lowercase , **_lowercase ):
warnings.warn(
'''`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead''' , _lowercase , )
return set_module_quantized_tensor_to_device(*_lowercase , **_lowercase )
def A ( _lowercase ):
SCREAMING_SNAKE_CASE : Tuple = deepcopy(_lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
SCREAMING_SNAKE_CASE : str = find_tied_parameters(_lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Union[str, Any] = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = sum(_lowercase , [] )
SCREAMING_SNAKE_CASE : Optional[int] = len(_lowercase ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE : List[Any] = not hasattr(_lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE : List[Any] = list(model.named_children() )
SCREAMING_SNAKE_CASE : Tuple = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE : Union[str, Any] = set(_lowercase ) - set(_lowercase )
SCREAMING_SNAKE_CASE : List[Any] = list(set(_lowercase ) ) + list(_lowercase )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE : int = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE : int = name.replace(_lowercase , '''''' )
filtered_module_names.append(_lowercase )
return filtered_module_names
| 34
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Optional[Any] = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """deberta-v2"""
def __init__( self : Optional[Any] , UpperCamelCase__ : Any=12_8100 , UpperCamelCase__ : Optional[int]=1536 , UpperCamelCase__ : Dict=24 , UpperCamelCase__ : List[str]=24 , UpperCamelCase__ : Tuple=6144 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[int]=512 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : List[Any]=1E-7 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=-1 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : str="gelu" , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = hidden_size
SCREAMING_SNAKE_CASE : int = num_hidden_layers
SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : str = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[Any] = relative_attention
SCREAMING_SNAKE_CASE : Optional[Any] = max_relative_positions
SCREAMING_SNAKE_CASE : Optional[int] = pad_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
SCREAMING_SNAKE_CASE : Optional[int] = [x.strip() for x in pos_att_type.lower().split('''|''' )]
SCREAMING_SNAKE_CASE : Any = pos_att_type
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Any = pooler_dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = pooler_hidden_act
class lowercase__ ( UpperCamelCase_):
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[Any] = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] )
else:
return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] )
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return 12
def __A ( self : Dict , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 34
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
_A : List[str] = False
_A : str = False
def UpperCamelCase_ ( snake_case_ : Namespace ) -> int:
'''simple docstring'''
return TrainCommand(snake_case_ )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
@staticmethod
def a ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Any:
__lowerCAmelCase = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=SCREAMING_SNAKE_CASE__ , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=SCREAMING_SNAKE_CASE__ , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=SCREAMING_SNAKE_CASE__ , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=SCREAMING_SNAKE_CASE__ , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE__ , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=SCREAMING_SNAKE_CASE__ , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=SCREAMING_SNAKE_CASE__ , default=3e-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=SCREAMING_SNAKE_CASE__ , default=1e-0_8 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Namespace ) -> Union[str, Any]:
__lowerCAmelCase = logging.get_logger("""transformers-cli/training""" )
__lowerCAmelCase = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = args.output
__lowerCAmelCase = args.column_label
__lowerCAmelCase = args.column_text
__lowerCAmelCase = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
__lowerCAmelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
__lowerCAmelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
__lowerCAmelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__lowerCAmelCase = args.validation_split
__lowerCAmelCase = args.train_batch_size
__lowerCAmelCase = args.valid_batch_size
__lowerCAmelCase = args.learning_rate
__lowerCAmelCase = args.adam_epsilon
def a ( self : str ) -> List[Any]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def a ( self : Dict ) -> str:
raise NotImplementedError
def a ( self : str ) -> Dict:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 427
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A : Optional[int] = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
_A : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 427
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_nezha': ['NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'NezhaConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST',
'NezhaForNextSentencePrediction',
'NezhaForMaskedLM',
'NezhaForPreTraining',
'NezhaForMultipleChoice',
'NezhaForQuestionAnswering',
'NezhaForSequenceClassification',
'NezhaForTokenClassification',
'NezhaModel',
'NezhaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 283
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
_snake_case = DebertaVaTokenizer
_snake_case = DebertaVaTokenizerFast
_snake_case = True
_snake_case = True
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_lowercase : Dict = DebertaVaTokenizer(lowerCamelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : int ):
"""simple docstring"""
_lowercase : Optional[Any] = 'this is a test'
_lowercase : int = 'this is a test'
return input_text, output_text
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_lowercase : Optional[int] = '<pad>'
_lowercase : List[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(lowerCamelCase_ ) , 3_0_0_0_1 )
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_lowercase : Optional[int] = ' \tHeLLo!how \n Are yoU? '
_lowercase : Optional[int] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
_lowercase : Optional[int] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_lowercase : List[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : int = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[int] = DebertaVaTokenizerFast(lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_lowercase : List[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : Dict = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Dict = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = 'I was born in 92000, and this is falsé.'
_lowercase : List[str] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : Union[str, Any] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_lowercase : Dict = 'I was born in 92000, and this is falsé.'
_lowercase : Union[str, Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
_lowercase : Optional[int] = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Tuple = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_lowercase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
_lowercase : int = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
_lowercase : Dict = DebertaVaTokenizer(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = DebertaVaTokenizerFast(lowerCamelCase_ , do_lower_case=lowerCamelCase_ , split_by_punct=lowerCamelCase_ )
_lowercase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : int = self.get_tokenizer()
_lowercase : str = self.get_rust_tokenizer()
_lowercase : Optional[int] = 'I was born in 92000, and this is falsé.'
_lowercase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
_lowercase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
_lowercase : List[Any] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = self.get_rust_tokenizer()
_lowercase : Tuple = tokenizer.encode(lowerCamelCase_ )
_lowercase : int = rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_lowercase : Union[str, Any] = 'This is a test'
_lowercase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_lowercase : Optional[Any] = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
_lowercase : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
_lowercase : List[Any] = DebertaVaTokenizer(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowercase : Optional[Any] = DebertaVaTokenizerFast(lowerCamelCase_ , keep_accents=lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Optional[Any] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : int = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# fmt: off
_lowercase : str = 'I was born in 92000, and this is falsé.'
_lowercase : str = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_lowercase : str = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
_lowercase : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
_lowercase : Tuple = tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : str = rust_tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Optional[Any] = DebertaVaTokenizer(lowerCamelCase_ )
_lowercase : str = tokenizer.encode('sequence builders' )
_lowercase : str = tokenizer.encode('multi-sequence build' )
_lowercase : Tuple = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
_lowercase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCamelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCamelCase_ , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_lowercase : Any = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 283
| 1
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class lowerCamelCase__ ( _a ):
a : Optional[int] = """SpeechT5FeatureExtractor"""
a : Tuple = """SpeechT5Tokenizer"""
def __init__( self : Union[str, Any] , A_ : int , A_ : Any ):
'''simple docstring'''
super().__init__(A_ , A_ )
def __call__( self : Optional[int] , *A_ : Optional[int] , **A_ : Any ):
'''simple docstring'''
__lowercase = kwargs.pop("""audio""" , A_ )
__lowercase = kwargs.pop("""text""" , A_ )
__lowercase = kwargs.pop("""text_target""" , A_ )
__lowercase = kwargs.pop("""audio_target""" , A_ )
__lowercase = kwargs.pop("""sampling_rate""" , A_ )
if audio is not None and text is not None:
raise ValueError(
"""Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?""" )
if audio_target is not None and text_target is not None:
raise ValueError(
"""Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?""" )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
"""You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.""" )
if audio is not None:
__lowercase = self.feature_extractor(A_ , *A_ , sampling_rate=A_ , **A_ )
elif text is not None:
__lowercase = self.tokenizer(A_ , **A_ )
else:
__lowercase = None
if audio_target is not None:
__lowercase = self.feature_extractor(audio_target=A_ , *A_ , sampling_rate=A_ , **A_ )
__lowercase = targets["""input_values"""]
elif text_target is not None:
__lowercase = self.tokenizer(A_ , **A_ )
__lowercase = targets["""input_ids"""]
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , *A_ : List[str] , **A_ : Tuple ):
'''simple docstring'''
__lowercase = kwargs.pop("""input_values""" , A_ )
__lowercase = kwargs.pop("""input_ids""" , A_ )
__lowercase = kwargs.pop("""labels""" , A_ )
if input_values is not None and input_ids is not None:
raise ValueError("""Cannot process both `input_values` and `input_ids` inputs.""" )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
"""You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.""" )
if input_values is not None:
__lowercase = self.feature_extractor.pad(A_ , *A_ , **A_ )
elif input_ids is not None:
__lowercase = self.tokenizer.pad(A_ , **A_ )
else:
__lowercase = None
if labels is not None:
if "input_ids" in labels or (isinstance(A_ , A_ ) and "input_ids" in labels[0]):
__lowercase = self.tokenizer.pad(A_ , **A_ )
__lowercase = targets["""input_ids"""]
else:
__lowercase = self.feature_extractor.feature_size
__lowercase = self.feature_extractor.num_mel_bins
__lowercase = self.feature_extractor.pad(A_ , *A_ , **A_ )
__lowercase = feature_size_hack
__lowercase = targets["""input_values"""]
else:
__lowercase = None
if inputs is None:
return targets
if targets is not None:
__lowercase = labels
__lowercase = targets.get("""attention_mask""" )
if decoder_attention_mask is not None:
__lowercase = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str , *A_ : int , **A_ : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A_ , **A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , *A_ : Dict , **A_ : Dict ):
'''simple docstring'''
return self.tokenizer.decode(*A_ , **A_ )
| 616
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 1_2_8, """min_length""": 1_2, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 1_4_2, """min_length""": 5_6, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 6_2, """min_length""": 1_1, """num_beams""": 6},
}
}
__lowercase = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 1_2_8,
"""task_specific_params.summarization.min_length""": 1_2,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 1_4_2,
"""task_specific_params.summarization_cnn.min_length""": 5_6,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 6_2,
"""task_specific_params.summarization_xsum.min_length""": 1_1,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(A_ ) , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(A_ ) , x.transpose() ) )
__lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(transpose(A_ ) , transpose(A_ ).numpy() ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , transpose(A_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(transpose(A_ ) , transpose(A_ ).numpy() ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , transpose(A_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(transpose(A_ ) , np.asarray(transpose(A_ ) ) ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(transpose(A_ , axes=(1, 2, 0) ) , np.asarray(transpose(A_ , axes=(1, 2, 0) ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , np.reshape(A_ , (4, 3) ) ) )
__lowercase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(A_ , (1_2, 5) ) , np.reshape(A_ , (1_2, 5) ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , reshape(A_ , (4, 3) ).numpy() ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(reshape(A_ , (1_2, 5) ) , reshape(A_ , (1_2, 5) ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , reshape(A_ , (4, 3) ).numpy() ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(reshape(A_ , (1_2, 5) ) , reshape(A_ , (1_2, 5) ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(reshape(A_ , (4, 3) ) , np.asarray(reshape(A_ , (4, 3) ) ) ) )
__lowercase = np.random.randn(3 , 4 , 5 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(reshape(A_ , (1_2, 5) ) , np.asarray(reshape(A_ , (1_2, 5) ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(A_ ) , np.squeeze(A_ ) ) )
__lowercase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , np.squeeze(A_ , axis=2 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = np.random.randn(1 , 3 , 4 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(squeeze(A_ ) , squeeze(A_ ).numpy() ) )
__lowercase = np.random.randn(1 , 4 , 1 , 5 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , squeeze(A_ , axis=2 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = np.random.randn(1 , 3 , 4 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(squeeze(A_ ) , squeeze(A_ ).numpy() ) )
__lowercase = np.random.randn(1 , 4 , 1 , 5 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , squeeze(A_ , axis=2 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
__lowercase = np.random.randn(1 , 3 , 4 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(squeeze(A_ ) , np.asarray(squeeze(A_ ) ) ) )
__lowercase = np.random.randn(1 , 4 , 1 , 5 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(squeeze(A_ , axis=2 ) , np.asarray(squeeze(A_ , axis=2 ) ) ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , np.expand_dims(A_ , axis=1 ) ) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = torch.tensor(A_ )
self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , expand_dims(A_ , axis=1 ).numpy() ) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = tf.constant(A_ )
self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , expand_dims(A_ , axis=1 ).numpy() ) )
@require_flax
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
__lowercase = np.random.randn(3 , 4 )
__lowercase = jnp.array(A_ )
self.assertTrue(np.allclose(expand_dims(A_ , axis=1 ) , np.asarray(expand_dims(A_ , axis=1 ) ) ) )
| 616
| 1
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A_ = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class UpperCamelCase__ ( lowercase__ ):
'''simple docstring'''
def __init__( self , *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
super().__init__(*__lowercase , **__lowercase )
requires_backends(self , 'vision' )
self.check_model_type(__lowercase )
def __call__( self , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> List[str]:
return super().__call__(__lowercase , **__lowercase )
def snake_case ( self , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
return {}, {}, {}
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Tuple:
__lowerCAmelCase : Union[str, Any] = load_image(__lowercase )
__lowerCAmelCase : Tuple = image.size
__lowerCAmelCase : List[str] = self.image_processor(images=__lowercase , return_tensors=self.framework )
return model_inputs
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = self.model(**__lowercase )
return model_outputs
def snake_case ( self , SCREAMING_SNAKE_CASE ) -> Dict:
__lowerCAmelCase : Dict = model_outputs.predicted_depth
__lowerCAmelCase : str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__lowercase )
__lowerCAmelCase : Tuple = prediction.squeeze().cpu().numpy()
__lowerCAmelCase : List[str] = (output * 2_55 / np.max(__lowercase )).astype('uint8' )
__lowerCAmelCase : List[Any] = Image.fromarray(__lowercase )
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : List[Any] = predicted_depth
__lowerCAmelCase : Tuple = depth
return output_dict
| 715
|
'''simple docstring'''
def A ( _UpperCAmelCase : int = 5_0 ) -> int:
'''simple docstring'''
__lowerCAmelCase : Any = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 123
| 0
|
'''simple docstring'''
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
def run_func(__SCREAMING_SNAKE_CASE ):
@wraps(__SCREAMING_SNAKE_CASE )
def run_in_eager_mode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return func(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
@wraps(__SCREAMING_SNAKE_CASE )
@tf.function(experimental_compile=__SCREAMING_SNAKE_CASE )
def run_in_graph_mode(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ):
return func(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> ["tf.Tensor"]:
"""simple docstring"""
__a = random.Random()
__a = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(__SCREAMING_SNAKE_CASE , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :TensorFlowBenchmarkArguments
a_ :PretrainedConfig
a_ :str ="TensorFlow"
@property
def __a ( self : Tuple ):
'''simple docstring'''
return tf.__version__
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__a = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_inference )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__a = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_speed(_train )
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__a = self._prepare_inference_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_inference )
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE__ )
__a = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
__a = self._prepare_train_func(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return self._measure_memory(_train )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__a = (
hasattr(SCREAMING_SNAKE_CASE__ , """architectures""" )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__a = __import__("""transformers""" , fromlist=[model_class] )
__a = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__a = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , """vocab_size""" ) else config.encoder.vocab_size
__a = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )
__a = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
__a = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
__a = (
hasattr(SCREAMING_SNAKE_CASE__ , """architectures""" )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE__ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__a = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
__a = __import__("""transformers""" , fromlist=[model_class] )
__a = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a = model_cls(SCREAMING_SNAKE_CASE__ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
__a = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE__ )
# encoder-decoder has vocab size saved differently
__a = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE__ , """vocab_size""" ) else config.encoder.vocab_size
__a = random_input_ids(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__a = model(SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__a = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , training=SCREAMING_SNAKE_CASE__ )[0]
__a = tf.gradients(SCREAMING_SNAKE_CASE__ , model.trainable_variables )
return gradients
__a = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(SCREAMING_SNAKE_CASE__ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__a = timeit.repeat(
SCREAMING_SNAKE_CASE__ , repeat=self.args.repeat , number=1_0 , )
return min(SCREAMING_SNAKE_CASE__ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Callable[[], None] ):
'''simple docstring'''
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
__a = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won't log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
__a = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
__a = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__a = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE__ )
__a = meminfo.used
__a = Memory(SCREAMING_SNAKE_CASE__ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
__a = None
else:
__a = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE__ )
__a = Memory(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else memory_bytes
if self.args.trace_memory_line_by_line:
__a = stop_memory_tracing(SCREAMING_SNAKE_CASE__ )
if memory is None:
__a = summary.total
else:
__a = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 582
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] ):
'''simple docstring'''
self.test()
def __a ( self : str ):
'''simple docstring'''
__a = 0
__a = False
while not completed:
if counter == 1:
self.reset()
__a = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE__ ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__a , __a , __a = self.update(SCREAMING_SNAKE_CASE__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def __a ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __a ( self : Optional[int] ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __a ( self : Dict ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
'''simple docstring'''
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[int] ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__a = token_ids
__a = len(self.token_ids )
__a = -1 # the index of the currently fulfilled step
__a = False
def __a ( self : Tuple ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = False
__a = False
__a = False
if self.does_advance(SCREAMING_SNAKE_CASE__ ):
self.fulfilled_idx += 1
__a = True
if self.fulfilled_idx == (self.seqlen - 1):
__a = True
__a = completed
else:
# failed to make progress.
__a = True
self.reset()
return stepped, completed, reset
def __a ( self : Any ):
'''simple docstring'''
__a = False
__a = 0
def __a ( self : int ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def __a ( self : Tuple , SCREAMING_SNAKE_CASE__ : Dict=False ):
'''simple docstring'''
__a = PhrasalConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.fulfilled_idx
__a = self.completed
return new_constraint
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[List[int]] , SCREAMING_SNAKE_CASE__ : Optional[int]=True ):
'''simple docstring'''
__a = max([len(SCREAMING_SNAKE_CASE__ ) for one in nested_token_ids] )
__a = {}
for token_ids in nested_token_ids:
__a = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE__ ):
if token_id not in level:
__a = {}
__a = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
__a = root
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a = self.trie
for current_token in current_seq:
__a = start[current_token]
__a = list(start.keys() )
return next_tokens
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
__a = self.next_tokens(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) == 0
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
__a = list(root.values() )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE__ ) for nn in next_nodes] )
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
__a = self.count_leaves(SCREAMING_SNAKE_CASE__ )
return len(SCREAMING_SNAKE_CASE__ ) != leaf_count
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[List[int]] ):
'''simple docstring'''
super(SCREAMING_SNAKE_CASE__ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or len(SCREAMING_SNAKE_CASE__ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__a = DisjunctiveTrie(SCREAMING_SNAKE_CASE__ )
__a = nested_token_ids
__a = self.trie.max_height
__a = []
__a = False
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
else:
return token_list
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE__ )}''' )
__a = False
__a = False
__a = False
if self.does_advance(SCREAMING_SNAKE_CASE__ ):
self.current_seq.append(SCREAMING_SNAKE_CASE__ )
__a = True
else:
__a = True
self.reset()
__a = self.trie.reached_leaf(self.current_seq )
__a = completed
return stepped, completed, reset
def __a ( self : Tuple ):
'''simple docstring'''
__a = False
__a = []
def __a ( self : List[str] ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __a ( self : Any , SCREAMING_SNAKE_CASE__ : List[str]=False ):
'''simple docstring'''
__a = DisjunctiveConstraint(self.token_ids )
if stateful:
__a = self.seqlen
__a = self.current_seq
__a = self.completed
return new_constraint
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[Constraint] ):
'''simple docstring'''
__a = constraints
# max # of steps required to fulfill a given constraint
__a = max([c.seqlen for c in constraints] )
__a = len(SCREAMING_SNAKE_CASE__ )
__a = False
self.init_state()
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = []
__a = None
__a = [constraint.copy(stateful=SCREAMING_SNAKE_CASE__ ) for constraint in self.constraints]
def __a ( self : Optional[Any] ):
'''simple docstring'''
__a = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __a ( self : int ):
'''simple docstring'''
__a = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__a = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
token_list.append(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
token_list.extend(SCREAMING_SNAKE_CASE__ )
else:
__a = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
token_list.append(SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
token_list.extend(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
return None
else:
return token_list
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[List[int]] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__a , __a = self.add(SCREAMING_SNAKE_CASE__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__a , __a = False, False
if self.completed:
__a = True
__a = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__a , __a , __a = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE__ ) )
__a = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__a = None
if len(self.pending_constraints ) == 0:
# we're done!
__a = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE__ ):
__a , __a , __a = pending_constraint.update(SCREAMING_SNAKE_CASE__ )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE__ )
__a = None
if not complete and stepped:
__a = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__a = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__a = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str]=True ):
'''simple docstring'''
__a = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__a = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__a = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE__ )
__a = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 582
| 1
|
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A :
def __init__( self : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any]=13 , __UpperCAmelCase : str=7 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : str=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=99 , __UpperCAmelCase : Union[str, Any]=16 , __UpperCAmelCase : Union[str, Any]=36 , __UpperCAmelCase : Optional[int]=6 , __UpperCAmelCase : Union[str, Any]=6 , __UpperCAmelCase : List[str]=6 , __UpperCAmelCase : Union[str, Any]=37 , __UpperCAmelCase : Dict="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Union[str, Any]=0.1 , __UpperCAmelCase : Dict=512 , __UpperCAmelCase : List[Any]=16 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Any=3 , __UpperCAmelCase : Optional[int]=4 , __UpperCAmelCase : Optional[Any]=None , ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = embedding_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_hidden_groups
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = scope
def lowercase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_input_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase , token_type_ids=__UpperCAmelCase )
UpperCamelCase_ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
UpperCamelCase_ = AlbertForPreTraining(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , sentence_order_label=__UpperCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowercase__ ( self : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = AlbertForMaskedLM(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertForQuestionAnswering(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForSequenceClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : List[str] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = AlbertForTokenClassification(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = AlbertForMultipleChoice(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = self.prepare_config_and_inputs()
(
(
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) , (
UpperCamelCase_
) ,
) = config_and_inputs
UpperCamelCase_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : int = True
def lowercase__ ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int=False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ = super()._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
if return_labels:
if model_class in get_values(__UpperCAmelCase ):
UpperCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCAmelCase )
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCAmelCase )
return inputs_dict
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : str ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__UpperCAmelCase )
def lowercase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCAmelCase )
def lowercase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = AlbertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_torch
class A ( unittest.TestCase ):
@slow
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ = AlbertModel.from_pretrained('albert-base-v2' )
UpperCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
UpperCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCamelCase_ = model(__UpperCAmelCase , attention_mask=__UpperCAmelCase )[0]
UpperCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCamelCase_ = torch.tensor(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCAmelCase , atol=1E-4 ) )
| 559
|
from sklearn.metrics import mean_squared_error
import datasets
__a : Union[str, Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
__a : Dict = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
__a : Any = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def lowercase__ ( self : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Union[str, Any]="uniform_average" , __UpperCAmelCase : List[Any]=True ) -> int:
"""simple docstring"""
UpperCamelCase_ = mean_squared_error(
__UpperCAmelCase , __UpperCAmelCase , sample_weight=__UpperCAmelCase , multioutput=__UpperCAmelCase , squared=__UpperCAmelCase )
return {"mse": mse}
| 559
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : int , lowercase_ : bool = False ):
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable:
raise ValueError(
"""Warning: upper bound of deterministic test is exceeded. """
"""Pass allow_probable=True to allow probabilistic test. """
"""A return value of True indicates a probable prime.""" )
# array bounds provided by analysis
lowercase = [
2047,
137_3653,
2532_6001,
32_1503_1751,
2_1523_0289_8747,
3_4747_4966_0383,
341_5500_7172_8321,
1,
382_5123_0565_4641_3051,
1,
1,
3186_6585_7834_0311_5116_7461,
3_3170_4406_4679_8873_8596_1981,
]
lowercase = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(lowercase_ , 1 ):
if n < _p:
# then we have our last prime to check
lowercase = primes[:idx]
break
lowercase , lowercase = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
lowercase = False
for r in range(lowercase_ ):
lowercase = pow(lowercase_ , d * 2**r , lowercase_ )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
lowercase = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def SCREAMING_SNAKE_CASE ( ):
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(83_8201 )
assert miller_rabin(83_8207 )
# 1_373_653
assert not miller_rabin(1731_6001 )
assert miller_rabin(1731_6017 )
# 25_326_001
assert not miller_rabin(30_7838_6641 )
assert miller_rabin(30_7838_6653 )
# 3_215_031_751
assert not miller_rabin(1_7130_4557_4801 )
assert miller_rabin(1_7130_4557_4819 )
# 2_152_302_898_747
assert not miller_rabin(2_7797_9972_8307 )
assert miller_rabin(2_7797_9972_8327 )
# 3_474_749_660_383
assert not miller_rabin(113_8500_2390_9441 )
assert miller_rabin(113_8500_2390_9527 )
# 341_550_071_728_321
assert not miller_rabin(127_5041_0188_4880_4351 )
assert miller_rabin(127_5041_0188_4880_4391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(796_6646_4458_5077_8779_1867 )
assert miller_rabin(796_6646_4458_5077_8779_1951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(5528_4067_7446_6478_9766_0333 )
assert miller_rabin(5528_4067_7446_6478_9766_0359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 588
|
'''simple docstring'''
import operator
def SCREAMING_SNAKE_CASE ( lowercase_ : list , lowercase_ : bool = False , lowercase_ : list | None = None ):
lowercase = operator.lt if reverse else operator.gt
lowercase = solution or []
if not arr:
return solution
lowercase = [arr.pop(0 )]
for i, item in enumerate(lowercase_ ):
if _operator(lowercase_ , sublist[-1] ):
sublist.append(lowercase_ )
arr.pop(lowercase_ )
# merging sublist into solution list
if not solution:
solution.extend(lowercase_ )
else:
while sublist:
lowercase = sublist.pop(0 )
for i, xx in enumerate(lowercase_ ):
if not _operator(lowercase_ , lowercase_ ):
solution.insert(lowercase_ , lowercase_ )
break
else:
solution.append(lowercase_ )
strand_sort(lowercase_ , lowercase_ , lowercase_ )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 588
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase__ = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ["CLIPFeatureExtractor"]
UpperCamelCase__ = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 548
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case : Any = MODEL_FOR_MASKED_LM_MAPPING
snake_case : Dict = TF_MODEL_FOR_MASKED_LM_MAPPING
def _lowerCamelCase ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 38015, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 25506, """token_str""": """ accuser"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1E-05,
"""token""": 38015,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1E-05,
"""token""": 25506,
"""token_str""": """ accuser""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 35676, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 16416, """token_str""": """ELS"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2941, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 13606, """token_str""": """ Clara"""},
] , )
UpperCamelCase__ = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase , decimals=6 ) , [
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2E-05,
"""token""": 35676,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2E-05, """token""": 16416, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
UpperCamelCase__ = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@slow
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__lowerCAmelCase )
@slow
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.008, """token""": 610, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.007, """token""": 1573, """token_str""": """ Chris"""},
] , )
UpperCamelCase__ = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.251,
"""token""": 2201,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.214,
"""token""": 12790,
"""token_str""": """ Lyon""",
},
] , )
UpperCamelCase__ = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.005, """token""": 3499, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.000, """token""": 13606, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.000, """token""": 2941, """token_str""": """ Te"""},
] , )
@require_torch
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
@require_tf
def _lowerCamelCase ( self ):
UpperCamelCase__ = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
UpperCamelCase__ = None
UpperCamelCase__ = None
self.run_pipeline_test(__lowerCAmelCase , [] )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = fill_masker.tokenizer
UpperCamelCase__ = fill_masker.model
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
with self.assertRaises(__lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__lowerCAmelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_targets(__lowerCAmelCase , __lowerCAmelCase )
self.run_test_top_k_targets(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__lowerCAmelCase , __lowerCAmelCase )
self.fill_mask_with_multiple_masks(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , targets=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Call argument
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __lowerCAmelCase )
UpperCamelCase__ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__lowerCAmelCase ) )
# Score equivalence
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""token_str"""] for top_mask in outputs]
UpperCamelCase__ = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ) == set(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__lowerCAmelCase )
UpperCamelCase__ = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase , top_k=2 )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = tokenizer.get_vocab()
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# top_k=2, ntargets=3
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCamelCase__ = [el["""token_str"""] for el in sorted(__lowerCAmelCase , key=lambda __lowerCAmelCase : x["score"] , reverse=__lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__lowerCAmelCase ).issubset(__lowerCAmelCase ):
UpperCamelCase__ = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__lowerCAmelCase ) , nested_simplify(__lowerCAmelCase ) )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCamelCase__ = sorted(vocab.keys() )[:3]
UpperCamelCase__ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCamelCase__ = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__lowerCAmelCase ) , 3 )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = FillMaskPipeline(model=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
UpperCamelCase__ = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__lowerCAmelCase , [
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
[
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
{"""sequence""": ANY(__lowerCAmelCase ), """score""": ANY(__lowerCAmelCase ), """token""": ANY(__lowerCAmelCase ), """token_str""": ANY(__lowerCAmelCase )},
],
] , )
| 548
| 1
|
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'spiece.model'}
snake_case_ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
}
}
snake_case_ = {
'google/bigbird-roberta-base': 4096,
'google/bigbird-roberta-large': 4096,
'google/bigbird-base-trivia-itc': 4096,
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ['input_ids', 'attention_mask']
A_ : List[int] = []
def __init__(self : Any , a__ : List[Any] , a__ : Any="<unk>" , a__ : Union[str, Any]="<s>" , a__ : Any="</s>" , a__ : int="<pad>" , a__ : List[str]="[SEP]" , a__ : Dict="[MASK]" , a__ : List[Any]="[CLS]" , a__ : Optional[Dict[str, Any]] = None , **a__ : str , ):
"""simple docstring"""
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else bos_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else eos_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else unk_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else pad_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else cls_token
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
__snake_case = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
__snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , pad_token=a__ , sep_token=a__ , mask_token=a__ , cls_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
__snake_case = vocab_file
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def a (self : List[Any] ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : str ):
"""simple docstring"""
__snake_case = self.__dict__.copy()
__snake_case = None
return state
def __setstate__(self : Tuple , a__ : Tuple ):
"""simple docstring"""
__snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case = {}
__snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def a (self : Optional[int] , a__ : str ):
"""simple docstring"""
return self.sp_model.encode(a__ , out_type=a__ )
def a (self : List[str] , a__ : Optional[int] ):
"""simple docstring"""
return self.sp_model.piece_to_id(a__ )
def a (self : Any , a__ : Dict ):
"""simple docstring"""
__snake_case = self.sp_model.IdToPiece(a__ )
return token
def a (self : List[Any] , a__ : Optional[Any] ):
"""simple docstring"""
__snake_case = []
__snake_case = ''''''
__snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
__snake_case = True
__snake_case = []
else:
current_sub_tokens.append(a__ )
__snake_case = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def a (self : str , a__ : List[int] , a__ : bool = False , a__ : bool = None , a__ : bool = True , **a__ : Optional[int] , ):
"""simple docstring"""
__snake_case = kwargs.pop('''use_source_tokenizer''' , a__ )
__snake_case = self.convert_ids_to_tokens(a__ , skip_special_tokens=a__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__snake_case = []
__snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
__snake_case = []
sub_texts.append(a__ )
else:
current_sub_text.append(a__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(a__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
__snake_case = re.sub(R''' (\[(MASK|SEP)\])''' , R'''\1''' , ''' '''.join(a__ ) )
else:
__snake_case = ''''''.join(a__ )
__snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__snake_case = self.clean_up_tokenization(a__ )
return clean_text
else:
return text
def a (self : Any , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
__snake_case = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
def a (self : List[Any] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : int , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : Any , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 592
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
snake_case_ = logging.get_logger(__name__)
snake_case_ = Dict[str, Any]
snake_case_ = List[Prediction]
@add_end_docstrings(_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def __init__(self : List[str] , *a__ : List[str] , **a__ : Tuple ):
"""simple docstring"""
super().__init__(*a__ , **a__ )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def a (self : int , **a__ : Any ):
"""simple docstring"""
__snake_case = {}
if "threshold" in kwargs:
__snake_case = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__(self : Optional[Any] , *a__ : str , **a__ : Dict ):
"""simple docstring"""
return super().__call__(*a__ , **a__ )
def a (self : Optional[Any] , a__ : str ):
"""simple docstring"""
__snake_case = load_image(a__ )
__snake_case = torch.IntTensor([[image.height, image.width]] )
__snake_case = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
__snake_case = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
__snake_case = target_size
return inputs
def a (self : List[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = model_inputs.pop('''target_size''' )
__snake_case = self.model(**a__ )
__snake_case = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
__snake_case = model_inputs['''bbox''']
return model_outputs
def a (self : Optional[int] , a__ : List[Any] , a__ : Optional[Any]=0.9 ):
"""simple docstring"""
__snake_case = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__snake_case , __snake_case = target_size[0].tolist()
def unnormalize(a__ : List[str] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
__snake_case , __snake_case = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__snake_case = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__snake_case = [unnormalize(a__ ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__snake_case = ['''score''', '''label''', '''box''']
__snake_case = [dict(zip(a__ , a__ ) ) for vals in zip(scores.tolist() , a__ , a__ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__snake_case = self.image_processor.post_process_object_detection(a__ , a__ , a__ )
__snake_case = raw_annotations[0]
__snake_case = raw_annotation['''scores''']
__snake_case = raw_annotation['''labels''']
__snake_case = raw_annotation['''boxes''']
__snake_case = scores.tolist()
__snake_case = [self.model.config.idalabel[label.item()] for label in labels]
__snake_case = [self._get_bounding_box(a__ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__snake_case = ['''score''', '''label''', '''box''']
__snake_case = [
dict(zip(a__ , a__ ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def a (self : List[str] , a__ : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
__snake_case , __snake_case , __snake_case , __snake_case = box.int().tolist()
__snake_case = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 592
| 1
|
"""simple docstring"""
def A( snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowercase__: Any = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 120
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : Optional[int] = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase ="roberta"
def __init__( self : Dict , a__ : Optional[Any]=50265 , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Any=12 , a__ : Any=3072 , a__ : Dict="gelu" , a__ : List[str]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Tuple=512 , a__ : Optional[Any]=2 , a__ : int=0.02 , a__ : List[str]=1e-1_2 , a__ : str=1 , a__ : Tuple=0 , a__ : str=2 , a__ : Optional[Any]="absolute" , a__ : Optional[int]=True , a__ : Tuple=None , **a__ : Union[str, Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = classifier_dropout
class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __snake_case ( self : Optional[Any] ):
if self.task == "multiple-choice":
UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 51
|
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A__ = float("""nan""")
class _lowerCAmelCase :
def __init__( self : List[str] , __snake_case : Dict ):
lowerCamelCase :int = sys.stdout
lowerCamelCase :str = open(__snake_case , '''a''' )
def __getattr__( self : int , __snake_case : Union[str, Any] ):
return getattr(self.stdout , __snake_case )
def snake_case ( self : Tuple , __snake_case : Dict ):
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __snake_case , 0 , re.M ) )
def _lowerCamelCase ( a_ : Union[str, Any]=80 , a_ : str=False):
lowerCamelCase :str = []
# deal with critical env vars
lowerCamelCase :Optional[Any] = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
lowerCamelCase :Optional[int] = os.environ.get(a_ , a_)
if val is not None:
cmd.append(F"{key}={val}")
# python executable (not always needed if the script is executable)
lowerCamelCase :str = sys.executable if full_python_path else sys.executable.split('''/''')[-1]
cmd.append(a_)
# now the normal args
cmd += list(map(shlex.quote , sys.argv))
# split up into up to MAX_WIDTH lines with shell multi-line escapes
lowerCamelCase :List[Any] = []
lowerCamelCase :Any = ''''''
while len(a_) > 0:
current_line += F"{cmd.pop(0)} "
if len(a_) == 0 or len(a_) + len(cmd[0]) + 1 > max_width - 1:
lines.append(a_)
lowerCamelCase :List[str] = ''''''
return "\\\n".join(a_)
def _lowerCamelCase ( a_ : Optional[int] , a_ : Dict):
# unwrap multi-line input
lowerCamelCase :int = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd)
# remove --output_dir if any and set our own
lowerCamelCase :Union[str, Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd)
args.base_cmd += F" --output_dir {output_dir}"
# ensure we have --overwrite_output_dir
lowerCamelCase :int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd)
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd)
def _lowerCamelCase ( a_ : List[Any] , a_ : Dict , a_ : int , a_ : List[str] , a_ : Optional[int] , a_ : List[Any] , a_ : Any):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0)
return dict(
{k: random.uniform(0 , 1_00) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222])} , )
lowerCamelCase :List[Any] = subprocess.run(a_ , capture_output=a_ , text=a_)
if verbose:
print('''STDOUT''' , result.stdout)
print('''STDERR''' , result.stderr)
# save the streams
lowerCamelCase :Union[str, Any] = variation.replace(''' ''' , '''-''')
with open(Path(a_) / F"log.{prefix}.stdout.txt" , '''w''') as f:
f.write(result.stdout)
with open(Path(a_) / F"log.{prefix}.stderr.txt" , '''w''') as f:
f.write(result.stderr)
if result.returncode != 0:
if verbose:
print('''failed''')
return {target_metric_key: nan}
with io.open(F"{output_dir}/all_results.json" , '''r''' , encoding='''utf-8''') as f:
lowerCamelCase :int = json.load(a_)
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def _lowerCamelCase ( a_ : List[Any] , a_ : Optional[Any] , a_ : Any , a_ : Optional[Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : Any , a_ : Union[str, Any] , a_ : List[str] , ):
lowerCamelCase :Optional[Any] = []
lowerCamelCase :List[Any] = []
lowerCamelCase :List[str] = F"{id}: {variation:<{longest_variation_len}}"
lowerCamelCase :Tuple = F"{preamble}: "
lowerCamelCase :Any = set(report_metric_keys + [target_metric_key])
for i in tqdm(range(a_) , desc=a_ , leave=a_):
lowerCamelCase :Optional[Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_)
lowerCamelCase :int = single_run_metrics[target_metric_key]
if not math.isnan(a_):
metrics.append(a_)
results.append(a_)
outcome += "✓"
else:
outcome += "✘"
lowerCamelCase :Dict = F"\33[2K\r{outcome}"
if len(a_) > 0:
lowerCamelCase :List[str] = {k: fmean([x[k] for x in metrics]) for k in metrics[0].keys()}
lowerCamelCase :Tuple = round(mean_metrics[target_metric_key] , 2)
lowerCamelCase :Union[str, Any] = F"{outcome} {mean_target}"
if len(a_) > 1:
results_str += F" {tuple(round(a_ , 2) for x in results)}"
print(a_)
lowerCamelCase :Optional[Any] = variation
return mean_metrics
else:
print(a_)
return {variation_key: variation, target_metric_key: nan}
def _lowerCamelCase ( ):
lowerCamelCase :str = torch.cuda.get_device_properties(torch.device('''cuda'''))
return F"\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"
def _lowerCamelCase ( a_ : List[str] , a_ : Tuple , a_ : Tuple , a_ : Optional[int] , a_ : int):
lowerCamelCase :List[str] = pd.DataFrame(a_)
lowerCamelCase :int = '''variation'''
lowerCamelCase :Tuple = '''diff_%'''
lowerCamelCase :List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation]):
# this may still return nan
lowerCamelCase :Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_):
# as a fallback, use the minimal value as the sentinel
lowerCamelCase :str = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_):
lowerCamelCase :Optional[Any] = df.apply(
lambda a_: round(1_00 * (r[target_metric_key] - sentinel_value) / sentinel_value)
if not math.isnan(r[target_metric_key])
else 0 , axis='''columns''' , )
# re-order columns
lowerCamelCase :Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
lowerCamelCase :str = df.reindex(a_ , axis='''columns''') # reorder cols
# capitalize
lowerCamelCase :Dict = df.rename(str.capitalize , axis='''columns''')
# make the cols as narrow as possible
lowerCamelCase :Any = df.rename(lambda a_: c.replace('''_''' , '''<br>''') , axis='''columns''')
lowerCamelCase :Tuple = df.rename(lambda a_: c.replace('''_''' , '''\n''') , axis='''columns''')
lowerCamelCase :List[Any] = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt='''.2f''')]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt='''.2f''')]
print('''\n\n'''.join(a_))
def _lowerCamelCase ( ):
lowerCamelCase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=a_ , type=a_ , required=a_ , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=a_ , type=a_ , nargs='''+''' , required=a_ , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=a_ , type=a_ , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=a_ , type=a_ , required=a_ , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=a_ , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=a_ , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=a_ , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=a_ , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
lowerCamelCase :Tuple = parser.parse_args()
lowerCamelCase :Dict = args.output_dir
Path(a_).mkdir(exist_ok=a_)
lowerCamelCase :List[Any] = get_base_command(a_ , a_)
# split each dimension into its --foo variations
lowerCamelCase :int = [list(map(str.strip , re.split(R'''\|''' , a_))) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
lowerCamelCase :List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*a_))))
lowerCamelCase :Union[str, Any] = max(len(a_) for x in variations)
# split wanted keys
lowerCamelCase :List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
lowerCamelCase :Optional[Any] = F"benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}.txt"
print(F"\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt")
print(F"and this script's output is also piped into {report_fn}")
lowerCamelCase :Optional[int] = Tee(a_)
print(F"\n*** Running {len(a_)} benchmarks:")
print(F"Base command: {' '.join(a_)}")
lowerCamelCase :Union[str, Any] = '''variation'''
lowerCamelCase :Optional[int] = []
for id, variation in enumerate(tqdm(a_ , desc='''Total completion: ''' , leave=a_)):
lowerCamelCase :List[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ))
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_)
if __name__ == "__main__":
main()
| 166
| 0
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
a : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Dict ) -> List[Any]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def _snake_case ( _snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = _TestCommandArgs(dataset=_snake_case , all_configs=_snake_case , save_infos=_snake_case )
_A = TestCommand(*_snake_case )
test_command.run()
_A = os.path.join(_snake_case , 'README.md' )
assert os.path.exists(_snake_case )
_A = DatasetInfosDict.from_directory(_snake_case )
_A = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_35_15_63,
'num_examples': 1_00_00,
},
{
'name': 'validation',
'num_bytes': 23_84_18,
'num_examples': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
_A , _A = getattr(dataset_infos['default'] , _snake_case ), getattr(expected_dataset_infos['default'] , _snake_case )
if key == "num_bytes":
assert is_apercent_close(_snake_case , _snake_case )
elif key == "splits":
assert list(_snake_case ) == list(_snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 720
|
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : NestedDataStructureLike[PathLike] , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Optional[Features] = None , _UpperCAmelCase : str = None , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : str , ):
super().__init__(
_UpperCAmelCase , split=_UpperCAmelCase , features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , streaming=_UpperCAmelCase , num_proc=_UpperCAmelCase , **_UpperCAmelCase , )
_A = field
_A = path_or_paths if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else {self.split: path_or_paths}
_A = Json(
cache_dir=_UpperCAmelCase , data_files=_UpperCAmelCase , features=_UpperCAmelCase , field=_UpperCAmelCase , **_UpperCAmelCase , )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Build iterable dataset
if self.streaming:
_A = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_A = None
_A = None
_A = None
_A = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , num_proc=self.num_proc , )
_A = self.builder.as_dataset(
split=self.split , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class lowercase_ :
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Dataset , _UpperCAmelCase : Union[PathLike, BinaryIO] , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[int] = None , **_UpperCAmelCase : int , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''' )
_A = dataset
_A = path_or_buf
_A = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_A = num_proc
_A = 'utf-8'
_A = to_json_kwargs
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = self.to_json_kwargs.pop('path_or_buf' , _UpperCAmelCase )
_A = self.to_json_kwargs.pop('orient' , 'records' )
_A = self.to_json_kwargs.pop('lines' , True if orient == 'records' else False )
_A = self.to_json_kwargs.pop('index' , False if orient in ['split', 'table'] else True )
_A = self.to_json_kwargs.pop('compression' , _UpperCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , 'wb' , compression=_UpperCAmelCase ) as buffer:
_A = self._write(file_obj=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
' was passed. Please provide a local path instead.' )
_A = self._write(
file_obj=self.path_or_buf , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **self.to_json_kwargs )
return written
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : List[Any] ):
_A , _A , _A , _A , _A = args
_A = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_A = batch.to_pandas().to_json(
path_or_buf=_UpperCAmelCase , orient=_UpperCAmelCase , lines=_UpperCAmelCase , index=_UpperCAmelCase , **_UpperCAmelCase )
if not json_str.endswith('\n' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : BinaryIO , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , **_UpperCAmelCase : Optional[Any] , ):
_A = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
_A = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_UpperCAmelCase )
else:
_A , _A = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='ba' , disable=not logging.is_progress_bar_enabled() , desc='Creating json from Arrow format' , ):
written += file_obj.write(_UpperCAmelCase )
return written
| 505
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowerCAmelCase: int = logging.get_logger(__name__)
@dataclass
class lowercase_ (lowercase__ ):
snake_case =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **lowercase_) -> Dict:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a__ =deprecated_arg[3:]
setattr(self , lowercase_ , not kwargs.pop(lowercase_))
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no_{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""")
a__ =kwargs.pop('torchscript' , self.torchscript)
a__ =kwargs.pop('torch_xla_tpu_print_metrics' , self.torch_xla_tpu_print_metrics)
a__ =kwargs.pop('fp16_opt_level' , self.fpaa_opt_level)
super().__init__(**lowercase_)
snake_case =field(default=lowercase__ , metadata={'help': 'Trace the models using torchscript'} )
snake_case =field(default=lowercase__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
snake_case =field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def __UpperCamelCase ( self) -> Tuple["torch.device", int]:
requires_backends(self , ['torch'])
logger.info('PyTorch: setting up devices')
if not self.cuda:
a__ =torch.device('cpu')
a__ =0
elif is_torch_tpu_available():
a__ =xm.xla_device()
a__ =0
else:
a__ =torch.device('cuda' if torch.cuda.is_available() else 'cpu')
a__ =torch.cuda.device_count()
return device, n_gpu
@property
def __UpperCamelCase ( self) -> List[Any]:
return is_torch_tpu_available() and self.tpu
@property
def __UpperCamelCase ( self) -> int:
requires_backends(self , ['torch'])
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def __UpperCamelCase ( self) -> "torch.device":
requires_backends(self , ['torch'])
return self._setup_devices[0]
@property
def __UpperCamelCase ( self) -> List[str]:
requires_backends(self , ['torch'])
return self._setup_devices[1]
@property
def __UpperCamelCase ( self) -> List[str]:
return self.n_gpu > 0
| 20
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( _a: int , _a: int )-> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , a % b )
lowerCamelCase__ = a // b
return (y, x - k * y)
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , _a )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
((lowerCamelCase__) , (lowerCamelCase__)) = extended_euclid(_a , _a )
if b < 0:
lowerCamelCase__ = (b % n + n) % n
return b
def snake_case ( _a: int , _a: int , _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ = invert_modulo(_a , _a ), invert_modulo(_a , _a )
lowerCamelCase__ = na * na
lowerCamelCase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 510
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__A )
class __A ( __A ):
def __init__( self :Optional[Any] , *__snake_case :Tuple , **__snake_case :Optional[Any] ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A__ ( self :List[str] , __snake_case :Any=None ):
'''simple docstring'''
__magic_name__ : Tuple ={}
if top_k is not None:
__magic_name__ : Optional[Any] =top_k
return {}, {}, postprocess_params
def __call__( self :Dict , __snake_case :Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case :Optional[Any] ):
'''simple docstring'''
return super().__call__(__snake_case , **__snake_case )
def A__ ( self :Optional[int] , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =load_image(__snake_case )
__magic_name__ : Tuple =self.image_processor(images=__snake_case , return_tensors=self.framework )
return model_inputs
def A__ ( self :Dict , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Optional[int] =self.model(**__snake_case )
return model_outputs
def A__ ( self :Tuple , __snake_case :Tuple , __snake_case :List[Any]=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
__magic_name__ : int =self.model.config.num_labels
if self.framework == "pt":
__magic_name__ : Dict =model_outputs.logits.softmax(-1 )[0]
__magic_name__ : Tuple =probs.topk(__snake_case )
elif self.framework == "tf":
__magic_name__ : Dict =stable_softmax(model_outputs.logits , axis=-1 )[0]
__magic_name__ : Dict =tf.math.top_k(__snake_case , k=__snake_case )
__magic_name__ : Tuple =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__magic_name__ : Optional[Any] =scores.tolist()
__magic_name__ : Optional[Any] =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(__snake_case , __snake_case )]
| 708
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Dict = {
"google/pix2struct-textcaps-base": (
"https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json"
),
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_text_model"""
UpperCamelCase = ["""past_key_values"""]
UpperCamelCase = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self :List[Any] , __snake_case :Dict=5_02_44 , __snake_case :Dict=7_68 , __snake_case :Union[str, Any]=64 , __snake_case :Union[str, Any]=20_48 , __snake_case :List[Any]=12 , __snake_case :Any=12 , __snake_case :str=32 , __snake_case :Dict=1_28 , __snake_case :Optional[int]=0.1 , __snake_case :Any=1E-6 , __snake_case :Optional[int]=1.0 , __snake_case :List[Any]="gelu_new" , __snake_case :Any=0 , __snake_case :Tuple=False , __snake_case :Any=0 , __snake_case :int=1 , __snake_case :Dict=False , __snake_case :Optional[Any]=True , **__snake_case :Dict , ):
'''simple docstring'''
__magic_name__ : List[str] =vocab_size
__magic_name__ : List[str] =hidden_size
__magic_name__ : Optional[int] =d_kv
__magic_name__ : Dict =d_ff
__magic_name__ : Tuple =num_layers
__magic_name__ : str =num_heads
__magic_name__ : Optional[Any] =relative_attention_num_buckets
__magic_name__ : List[Any] =relative_attention_max_distance
__magic_name__ : str =dropout_rate
__magic_name__ : Optional[Any] =layer_norm_epsilon
__magic_name__ : Union[str, Any] =initializer_factor
__magic_name__ : Tuple =use_cache
__magic_name__ : Any =eos_token_id
__magic_name__ : int =decoder_start_token_id
# for backwards compatibility
__magic_name__ : Union[str, Any] =dense_act_fn
super().__init__(
pad_token_id=__snake_case , eos_token_id=__snake_case , decoder_start_token_id=__snake_case , tie_word_embeddings=__snake_case , is_decoder=__snake_case , **__snake_case , )
@classmethod
def A__ ( cls :Union[str, Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[Any] =cls.get_config_dict(__snake_case , **__snake_case )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : Tuple =config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct_vision_model"""
def __init__( self :Union[str, Any] , __snake_case :Tuple=7_68 , __snake_case :Any=7_68 , __snake_case :Tuple=20_48 , __snake_case :Dict=64 , __snake_case :List[str]=12 , __snake_case :str=12 , __snake_case :str="gelu_new" , __snake_case :Optional[int]=1E-6 , __snake_case :Optional[int]=0.0 , __snake_case :Any=0.0 , __snake_case :Any=1E-10 , __snake_case :List[str]=1.0 , __snake_case :Tuple=40_96 , __snake_case :Optional[int]=32 , __snake_case :Union[str, Any]=1_28 , **__snake_case :Optional[int] , ):
'''simple docstring'''
super().__init__(**__snake_case )
__magic_name__ : Union[str, Any] =hidden_size
__magic_name__ : Dict =patch_embed_hidden_size
__magic_name__ : Tuple =d_ff
__magic_name__ : str =dropout_rate
__magic_name__ : str =num_hidden_layers
__magic_name__ : Dict =num_attention_heads
__magic_name__ : List[str] =initializer_range
__magic_name__ : Optional[int] =initializer_factor
__magic_name__ : str =attention_dropout
__magic_name__ : Union[str, Any] =layer_norm_eps
__magic_name__ : List[str] =dense_act_fn
__magic_name__ : List[str] =seq_len
__magic_name__ : str =relative_attention_num_buckets
__magic_name__ : Optional[int] =relative_attention_max_distance
__magic_name__ : List[str] =d_kv
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :Union[str, os.PathLike] , **__snake_case :Any ):
'''simple docstring'''
cls._set_token_in_kwargs(__snake_case )
__magic_name__ , __magic_name__ : Optional[int] =cls.get_config_dict(__snake_case , **__snake_case )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
__magic_name__ : int =config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__snake_case , **__snake_case )
class __A ( UpperCamelCase__ ):
UpperCamelCase = """pix2struct"""
UpperCamelCase = True
def __init__( self :Union[str, Any] , __snake_case :Optional[int]=None , __snake_case :List[Any]=None , __snake_case :Optional[Any]=1.0 , __snake_case :Tuple=0.02 , __snake_case :str=False , __snake_case :List[str]=False , __snake_case :str=True , **__snake_case :List[str] , ):
'''simple docstring'''
super().__init__(tie_word_embeddings=__snake_case , is_encoder_decoder=__snake_case , **__snake_case )
if text_config is None:
__magic_name__ : int ={}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
__magic_name__ : str ={}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
__magic_name__ : Union[str, Any] =PixaStructTextConfig(**__snake_case )
__magic_name__ : str =PixaStructVisionConfig(**__snake_case )
__magic_name__ : int =self.text_config.decoder_start_token_id
__magic_name__ : Optional[Any] =self.text_config.pad_token_id
__magic_name__ : str =self.text_config.eos_token_id
__magic_name__ : int =initializer_factor
__magic_name__ : List[Any] =initializer_range
__magic_name__ : Dict =self.initializer_range
__magic_name__ : Union[str, Any] =self.initializer_range
__magic_name__ : Tuple =is_vqa
@classmethod
def A__ ( cls :Optional[int] , __snake_case :PixaStructTextConfig , __snake_case :PixaStructVisionConfig , **__snake_case :List[str] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__snake_case )
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =copy.deepcopy(self.__dict__ )
__magic_name__ : Any =self.text_config.to_dict()
__magic_name__ : List[str] =self.vision_config.to_dict()
__magic_name__ : Optional[int] =self.__class__.model_type
return output
| 367
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase ( lowercase__ : Callable[[int | float], int | float], lowercase__ : int | float, lowercase__ : int | float, lowercase__ : int = 1_00, ):
'''simple docstring'''
__lowercase =x_start
__lowercase =fnc(lowercase__ )
__lowercase =0.0
for _ in range(lowercase__ ):
# Approximates curve as a sequence of linear lines and sums their length
__lowercase =(x_end - x_start) / steps + xa
__lowercase =fnc(lowercase__ )
length += math.hypot(xa - xa, fxa - fxa )
# Increment step
__lowercase =xa
__lowercase =fxa
return length
if __name__ == "__main__":
def __UpperCamelCase ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
return math.sin(10 * x )
print('''f(x) = sin(10 * x)''')
print('''The length of the curve from x = -10 to x = 10 is:''')
UpperCAmelCase = 10
while i <= 10_0000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 119
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(0, lowercase__ ):
for _ in range(0, n - i - 1 ): # printing spaces
print(' ', end='' )
for _ in range(0, i + 1 ): # printing stars
print('* ', end='' )
print()
def __UpperCamelCase ( lowercase__ : Optional[Any] ):
'''simple docstring'''
for i in range(lowercase__, 0, -1 ):
for _ in range(lowercase__, 0, -1 ): # printing stars
print('* ', end='' )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(' ', end='' )
def __UpperCamelCase ( lowercase__ : Any ):
'''simple docstring'''
if n <= 0:
print(' ... .... nothing printing :(' )
return
floyd(lowercase__ ) # upper half
reverse_floyd(lowercase__ ) # lower half
if __name__ == "__main__":
print(r'''| /\ | |- | |- |--| |\ /| |-''')
print(r'''|/ \| |- |_ |_ |__| | \/ | |_''')
UpperCAmelCase = 1
while K:
UpperCAmelCase = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
UpperCAmelCase = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 119
| 1
|
import os
from pathlib import Path
def _lowerCamelCase( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[Any] ) -> List[Any]:
A : Any = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, oder?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A : List[Any] = {
'''ru-en''': ['''[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)''', '''39.20'''],
'''en-ru''': ['''[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)''', '''33.47'''],
'''en-de''': ['''[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)''', '''42.83'''],
'''de-en''': ['''[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)''', '''41.35'''],
}
A : List[Any] = F'''{src_lang}-{tgt_lang}'''
A : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A : Optional[int] = os.path.join(UpperCamelCase__ , '''README.md''' )
print(F'''Generating {path}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCamelCase__ )
# make sure we are under the root of the project
snake_case_ = Path(__file__).resolve().parent.parent.parent
snake_case_ = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
snake_case_ , snake_case_ , snake_case_ = model_name.split("""-""")
snake_case_ = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 703
|
'''simple docstring'''
snake_case_ = [
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
snake_case_ = [
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
snake_case_ = [
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case_ = [
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
snake_case_ = [
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
snake_case_ = [
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
snake_case_ = [
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
snake_case_ = [
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 537
| 0
|
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowerCAmelCase_ () -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = torch.nn.Linear(2 , 4 )
lowerCAmelCase__ = torch.optim.AdamW(model.parameters() , lr=1.0 )
lowerCAmelCase__ = torch.optim.lr_scheduler.OneCycleLR(lowercase__ , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
lowerCAmelCase__ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
lowerCAmelCase__ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowerCAmelCase_ (lowercase__ : Dict ) -> Tuple:
'''simple docstring'''
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowerCAmelCase_ (lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(lowercase__ )
class lowerCAmelCase_ ( snake_case__ ):
@require_cuda
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = Accelerator(cpu=__UpperCAmelCase )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = GradientState()
assert state.num_steps == 1
lowerCAmelCase__ = 4
assert state.num_steps == 4
assert state.sync_gradients is True
lowerCAmelCase__ = False
assert state.sync_gradients is False
GradientState._reset_state()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def __snake_case ( self : Optional[int] ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ):
pass
with patch('''torch.cuda.set_device''' , __UpperCAmelCase ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
lowerCAmelCase__ = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = get_signature(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1e-3 )
def __snake_case ( self : int ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ = get_signature(__UpperCAmelCase )
# saving hook
def save_config(SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__UpperCAmelCase , '''data.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# loading hook
def load_config(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
with open(os.path.join(__UpperCAmelCase , '''data.json''' ) , '''r''' ) as f:
lowerCAmelCase__ = json.load(__UpperCAmelCase )
lowerCAmelCase__ = config['''class_name''']
lowerCAmelCase__ = accelerator.register_save_state_pre_hook(__UpperCAmelCase )
lowerCAmelCase__ = accelerator.register_load_state_pre_hook(__UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__UpperCAmelCase )
# make sure random weights don't match with hooks removed
load_random_weights(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) > 1e-3 )
# random class name to verify correct one is loaded
lowerCAmelCase__ = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__UpperCAmelCase )
self.assertTrue(abs(model_signature - get_signature(__UpperCAmelCase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
lowerCAmelCase__ = None
# This should work
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertTrue(dummy_obj is None )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = create_components()
lowerCAmelCase__ = [1, 2, 3]
# This should work
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(__UpperCAmelCase , '''_is_accelerate_prepared''' , __UpperCAmelCase ) , __UpperCAmelCase , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def __snake_case ( self : Any ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__UpperCAmelCase , device_map={'''''': 0} , )
lowerCAmelCase__ = Accelerator()
# This should work
lowerCAmelCase__ = accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
def __snake_case ( self : str ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ = Accelerator()
with init_empty_weights():
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase__ = infer_auto_device_map(__UpperCAmelCase )
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=__UpperCAmelCase , load_in_abit=__UpperCAmelCase , llm_inta_enable_fpaa_cpu_offload=__UpperCAmelCase )
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = accelerator.prepare(__UpperCAmelCase )
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : int ):
from transformers import AutoModelForCausalLM
lowerCAmelCase__ = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
lowerCAmelCase__ = infer_auto_device_map(__UpperCAmelCase )
lowerCAmelCase__ = 1
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
lowerCAmelCase__ = Accelerator()
# This should not work and get value error
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ = accelerator.prepare(__UpperCAmelCase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def __snake_case ( self : int ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
lowerCAmelCase__ = infer_auto_device_map(__UpperCAmelCase )
lowerCAmelCase__ = 1
lowerCAmelCase__ = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=__UpperCAmelCase , device_map=__UpperCAmelCase , )
lowerCAmelCase__ = Accelerator()
# This should work
lowerCAmelCase__ = accelerator.prepare(__UpperCAmelCase )
@require_cuda
def __snake_case ( self : str ):
lowerCAmelCase__ = torch.nn.Linear(10 , 10 )
lowerCAmelCase__ = torch.optim.SGD(model.parameters() , lr=0.01 )
lowerCAmelCase__ = Accelerator(cpu=__UpperCAmelCase )
lowerCAmelCase__ = accelerator.prepare(__UpperCAmelCase )
| 668
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class lowercase_ :
'''simple docstring'''
def __init__( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : bool = True , __UpperCAmelCase : bool = False ) ->Tuple:
"""simple docstring"""
a = scheduler
a = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
a = split_batches
a = step_with_optimizer
a = GradientState()
def __lowerCAmelCase ( self : Tuple , *__UpperCAmelCase : List[Any] , **__UpperCAmelCase : Optional[Any] ) ->int:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
a = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , '''total_steps''' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCAmelCase ( self : Optional[int] ) ->List[str]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
return self.scheduler.state_dict()
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Any ) ->Tuple:
"""simple docstring"""
self.scheduler.load_state_dict(__UpperCAmelCase )
def __lowerCAmelCase ( self : Dict ) ->Optional[int]:
"""simple docstring"""
return self.scheduler.get_lr()
def __lowerCAmelCase ( self : int , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Optional[Any] ) ->List[str]:
"""simple docstring"""
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 117
| 0
|
from __future__ import annotations
a__ : Tuple = [True] * 1_00_00_01
a__ : Tuple = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
a__ : Optional[int] = False
i += 1
def _lowerCAmelCase ( A__ ):
return seive[n]
def _lowerCAmelCase ( A__ ):
return any(digit in '02468' for digit in str(A__ ) )
def _lowerCAmelCase ( A__ = 1_000_000 ):
lowercase__ = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(A__ ) and not contains_an_even_digit(A__ ):
lowercase__ = str(A__ )
lowercase__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(A__ ) )]
if all(is_prime(A__ ) for i in list_nums ):
result.append(A__ )
return result
def _lowerCAmelCase ( ):
return len(find_circular_primes() )
if __name__ == "__main__":
print(F'''{len(find_circular_primes()) = }''')
| 703
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : List[Any] = None
A : Optional[int] = None
@property
def UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowerCAmelCase , 'feature_size'))
self.assertTrue(hasattr(lowerCAmelCase , 'sampling_rate'))
self.assertTrue(hasattr(lowerCAmelCase , 'padding_value'))
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
self.assertTrue(all(len(lowerCAmelCase) == len(lowerCAmelCase) for x, y in zip(lowerCAmelCase , processed_features[input_name])))
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_torch
def UpperCAmelCase ( self : Dict) -> int:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
@require_tf
def UpperCAmelCase ( self : Optional[Any]) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCAmelCase)
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf')
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0]), self.feat_extract_tester.feature_size))
def UpperCAmelCase ( self : str , lowerCAmelCase : str=False) -> Union[str, Any]:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , padding=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest')
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[-1]))
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length')[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
self.assertTrue(len(input_a[0]) == pad_min_length)
self.assertTrue(len(input_a[1]) == pad_min_length + pad_diff)
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0])))
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size)
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(lowerCAmelCase , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , pad_to_multiple_of=10)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , pad_to_multiple_of=10 , max_length=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(lowerCAmelCase) % 10 == 0 for x in input_a))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCAmelCase) == expected_mult_pad_length for x in input_a))
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length))
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size)
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0])[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[1])[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff))
< 1E-3)
self.assertTrue(
abs(
np.asarray(input_a[2])[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff))
< 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length)) < 1E-3)
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length))
< 1E-3)
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Dict=False) -> str:
"""simple docstring"""
def _inputs_have_equal_length(lowerCAmelCase : int):
lowercase__ = len(input[0])
for input_slice in input[1:]:
if len(lowerCAmelCase) != length:
return False
return True
def _inputs_are_equal(lowerCAmelCase : str , lowerCAmelCase : Optional[Any]):
if len(lowerCAmelCase) != len(lowerCAmelCase):
return False
for input_slice_a, input_slice_a in zip(lowerCAmelCase , lowerCAmelCase):
if not np.allclose(np.asarray(lowerCAmelCase) , np.asarray(lowerCAmelCase) , atol=1E-3):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCAmelCase)
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
# truncate to smallest
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]))
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to smallest with np
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np' , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(input_a.shape[1] == len(speech_inputs[0]))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
# truncate to middle
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , truncation=lowerCAmelCase)
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[1]) , return_tensors='np')
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1]))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(_inputs_are_equal(lowerCAmelCase , lowerCAmelCase))
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
self.assertTrue(len(input_a[-1]) == len(speech_inputs[-1]))
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='longest' , truncation=lowerCAmelCase)[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCAmelCase):
feat_extract.pad(lowerCAmelCase , padding='max_length' , truncation=lowerCAmelCase)[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , truncation=lowerCAmelCase , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=len(speech_inputs[0]) , pad_to_multiple_of=lowerCAmelCase , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0])
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0]) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0]) == expected_length)
self.assertTrue(_inputs_have_equal_length(lowerCAmelCase))
self.assertFalse(_inputs_have_equal_length(lowerCAmelCase))
def UpperCAmelCase ( self : List[str]) -> List[str]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : List[Any]) -> int:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
self._check_truncation(numpify=lowerCAmelCase)
@require_torch
def UpperCAmelCase ( self : Dict) -> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='pt')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_pt.numpy().astype(np.floataa).sum()) < 1E-2)
@require_tf
def UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')[input_name]
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='tf')[input_name]
self.assertTrue(abs(input_np.astype(np.floataa).sum() - input_tf.numpy().astype(np.floataa).sum()) < 1E-2)
def UpperCAmelCase ( self : Optional[Any]) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = feat_extract.pad(lowerCAmelCase , padding='longest' , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(list(processed.attention_mask.shape) , list(processed[input_name].shape[:2]))
self.assertListEqual(processed.attention_mask.sum(-1).tolist() , lowerCAmelCase)
def UpperCAmelCase ( self : str) -> Tuple:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**lowerCAmelCase)
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(lowerCAmelCase) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs})
lowercase__ = min(lowerCAmelCase)
lowercase__ = feat_extract.pad(
lowerCAmelCase , padding='max_length' , max_length=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors='np')
self.assertIn('attention_mask' , lowerCAmelCase)
self.assertListEqual(
list(processed_pad.attention_mask.shape) , [processed_pad[input_name].shape[0], max_length])
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1).tolist() , [max_length for x in speech_inputs])
| 642
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
lowerCamelCase = {
'junnyu/roformer_chinese_small': 1_536,
'junnyu/roformer_chinese_base': 1_536,
'junnyu/roformer_chinese_char_small': 512,
'junnyu/roformer_chinese_char_base': 512,
'junnyu/roformer_small_discriminator': 128,
'junnyu/roformer_small_generator': 128,
}
lowerCamelCase = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase = RoFormerTokenizer
def __init__( self : Optional[int] , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : str="[UNK]" , _UpperCAmelCase : Tuple="[SEP]" , _UpperCAmelCase : Dict="[PAD]" , _UpperCAmelCase : Any="[CLS]" , _UpperCAmelCase : Optional[int]="[MASK]" , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=None , **_UpperCAmelCase : Optional[int] , ) -> Any:
'''simple docstring'''
super().__init__(
lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , do_lower_case=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , tokenize_chinese_chars=lowerCamelCase_ , strip_accents=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , lowerCamelCase_ ) != do_lower_case
or pre_tok_state.get("strip_accents" , lowerCamelCase_ ) != strip_accents
):
UpperCAmelCase_ = getattr(lowerCamelCase_ , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = pre_tok_class(**lowerCamelCase_ )
UpperCAmelCase_ = do_lower_case
def __getstate__( self : Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = BertPreTokenizer()
return state
def __setstate__( self : Any , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = d
UpperCAmelCase_ = self.__dict__['''_tokenizer'''].get_vocab()
UpperCAmelCase_ = PreTokenizer.custom(JiebaPreTokenizer(lowerCamelCase_ ) )
def lowercase__ ( self : Any , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any]=None ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : str , _UpperCAmelCase : Any , _UpperCAmelCase : Any = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def lowercase__ ( self : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , **_UpperCAmelCase : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = BertPreTokenizer()
return super().save_pretrained(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
| 82
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : str = {
'configuration_git': ['GIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GitConfig', 'GitVisionConfig'],
'processing_git': ['GitProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : str = [
'GIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GitForCausalLM',
'GitModel',
'GitPreTrainedModel',
'GitVisionModel',
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 614
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowercase ( lowerCAmelCase : list[int | str]):
"""simple docstring"""
create_state_space_tree(lowerCAmelCase , [] , 0 , [0 for i in range(len(lowerCAmelCase))])
def lowercase ( lowerCAmelCase : list[int | str] , lowerCAmelCase : list[int | str] , lowerCAmelCase : int , lowerCAmelCase : list[int] , ):
"""simple docstring"""
if index == len(lowerCAmelCase):
print(lowerCAmelCase)
return
for i in range(len(lowerCAmelCase)):
if not index_used[i]:
current_sequence.append(sequence[i])
_A : int = True
create_state_space_tree(lowerCAmelCase , lowerCAmelCase , index + 1 , lowerCAmelCase)
current_sequence.pop()
_A : str = False
__UpperCamelCase : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__UpperCamelCase : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 417
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase__ ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = """ssube/stable-diffusion-x4-upscaler-onnx"""
def _lowerCamelCase ( self , UpperCAmelCase__=0 ) -> Any:
_A : Tuple = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(UpperCAmelCase__ ) )
_A : Union[str, Any] = torch.manual_seed(UpperCAmelCase__ )
_A : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _lowerCamelCase ( self ) -> int:
_A : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Tuple = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : List[str] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : int = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> List[str]:
_A : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : Optional[Any] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Union[str, Any] = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Dict = np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Dict = self.get_dummy_inputs()
_A : Tuple = pipe(**UpperCAmelCase__ ).images
_A : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> List[Any]:
_A : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Tuple = self.get_dummy_inputs()
_A : Optional[int] = pipe(**UpperCAmelCase__ ).images
_A : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : List[Any] = np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCamelCase ( self ) -> Dict:
_A : Tuple = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
_A : str = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Dict = self.get_dummy_inputs()
_A : Union[str, Any] = pipe(**UpperCAmelCase__ ).images
_A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_A : Tuple = np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowerCamelCase ( self ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCamelCase ( self ) -> Optional[int]:
_A : int = ort.SessionOptions()
_A : Tuple = False
return options
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_A : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
# using the PNDM scheduler by default
_A : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Optional[int] = '''A fantasy landscape, trending on artstation'''
_A : Union[str, Any] = torch.manual_seed(0 )
_A : List[str] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=1_0 , generator=UpperCAmelCase__ , output_type='''np''' , )
_A : Tuple = output.images
_A : List[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : Optional[int] = np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCamelCase ( self ) -> Union[str, Any]:
_A : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
_A : Optional[Any] = init_image.resize((1_2_8, 1_2_8) )
_A : Dict = LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , subfolder='''scheduler''' )
_A : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''' , scheduler=UpperCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_A : Optional[Any] = '''A fantasy landscape, trending on artstation'''
_A : Any = torch.manual_seed(0 )
_A : List[Any] = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , guidance_scale=7.5 , num_inference_steps=2_0 , generator=UpperCAmelCase__ , output_type='''np''' , )
_A : Optional[int] = output.images
_A : Optional[Any] = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
_A : str = np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 417
| 1
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCamelCase = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _lowerCamelCase ( UpperCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _lowerCamelCase ( UpperCAmelCase_ : List[str], UpperCAmelCase_ : str ) -> List[Any]:
"""simple docstring"""
if args.student_type == "roberta":
A__ = False
elif args.student_type == "gpt2":
A__ = False
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
if args.student_type == "roberta":
A__ = False
def _lowerCamelCase ( ) -> Any:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="Training" )
parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists." )
parser.add_argument(
"--dump_path", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The output directory (log, checkpoints, parameters, etc.)" )
parser.add_argument(
"--data_file", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", )
parser.add_argument(
"--student_type", type=UpperCAmelCase_, choices=["distilbert", "roberta", "gpt2"], required=UpperCAmelCase_, help="The student type (DistilBERT, RoBERTa).", )
parser.add_argument("--student_config", type=UpperCAmelCase_, required=UpperCAmelCase_, help="Path to the student configuration." )
parser.add_argument(
"--student_pretrained_weights", default=UpperCAmelCase_, type=UpperCAmelCase_, help="Load student initialization checkpoint." )
parser.add_argument(
"--teacher_type", choices=["bert", "roberta", "gpt2"], required=UpperCAmelCase_, help="Teacher type (BERT, RoBERTa)." )
parser.add_argument("--teacher_name", type=UpperCAmelCase_, required=UpperCAmelCase_, help="The teacher model." )
parser.add_argument("--temperature", default=2.0, type=UpperCAmelCase_, help="Temperature for the softmax temperature." )
parser.add_argument(
"--alpha_ce", default=0.5, type=UpperCAmelCase_, help="Linear weight for the distillation loss. Must be >=0." )
parser.add_argument(
"--alpha_mlm", default=0.0, type=UpperCAmelCase_, help="Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.", )
parser.add_argument("--alpha_clm", default=0.5, type=UpperCAmelCase_, help="Linear weight for the CLM loss. Must be >=0." )
parser.add_argument("--alpha_mse", default=0.0, type=UpperCAmelCase_, help="Linear weight of the MSE loss. Must be >=0." )
parser.add_argument(
"--alpha_cos", default=0.0, type=UpperCAmelCase_, help="Linear weight of the cosine embedding loss. Must be >=0." )
parser.add_argument(
"--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." )
parser.add_argument(
"--mlm_mask_prop", default=0.15, type=UpperCAmelCase_, help="Proportion of tokens for which we need to make a prediction.", )
parser.add_argument("--word_mask", default=0.8, type=UpperCAmelCase_, help="Proportion of tokens to mask out." )
parser.add_argument("--word_keep", default=0.1, type=UpperCAmelCase_, help="Proportion of tokens to keep." )
parser.add_argument("--word_rand", default=0.1, type=UpperCAmelCase_, help="Proportion of tokens to randomly replace." )
parser.add_argument(
"--mlm_smoothing", default=0.7, type=UpperCAmelCase_, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", )
parser.add_argument("--token_counts", type=UpperCAmelCase_, help="The token counts in the data_file for MLM." )
parser.add_argument(
"--restrict_ce_to_mask", action="store_true", help="If true, compute the distillation loss only the [MLM] prediction distribution.", )
parser.add_argument(
"--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", )
parser.add_argument(
"--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", )
parser.add_argument("--n_epoch", type=UpperCAmelCase_, default=3, help="Number of pass on the whole dataset." )
parser.add_argument("--batch_size", type=UpperCAmelCase_, default=5, help="Batch size (for each process)." )
parser.add_argument(
"--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", )
parser.add_argument(
"--gradient_accumulation_steps", type=UpperCAmelCase_, default=50, help="Gradient accumulation for larger training batches.", )
parser.add_argument("--warmup_prop", default=0.05, type=UpperCAmelCase_, help="Linear warmup proportion." )
parser.add_argument("--weight_decay", default=0.0, type=UpperCAmelCase_, help="Weight decay if we apply some." )
parser.add_argument("--learning_rate", default=5e-4, type=UpperCAmelCase_, help="The initial learning rate for Adam." )
parser.add_argument("--adam_epsilon", default=1e-6, type=UpperCAmelCase_, help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm", default=5.0, type=UpperCAmelCase_, help="Max gradient norm." )
parser.add_argument("--initializer_range", default=0.02, type=UpperCAmelCase_, help="Random initialization range." )
parser.add_argument(
"--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", )
parser.add_argument(
"--fp16_opt_level", type=UpperCAmelCase_, default="O1", help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
), )
parser.add_argument("--n_gpu", type=UpperCAmelCase_, default=1, help="Number of GPUs in the node." )
parser.add_argument("--local_rank", type=UpperCAmelCase_, default=-1, help="Distributed training - Local rank" )
parser.add_argument("--seed", type=UpperCAmelCase_, default=56, help="Random seed" )
parser.add_argument("--log_interval", type=UpperCAmelCase_, default=500, help="Tensorboard logging interval." )
parser.add_argument("--checkpoint_interval", type=UpperCAmelCase_, default=4000, help="Checkpoint interval." )
A__ = parser.parse_args()
sanity_checks(UpperCAmelCase_ )
# ARGS #
init_gpu_params(UpperCAmelCase_ )
set_seed(UpperCAmelCase_ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"""Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"""
" itUse `--force` if you want to overwrite it" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"""Experiment will be dumped and logged in {args.dump_path}""" )
# SAVE PARAMS #
logger.info(F"""Param: {args}""" )
with open(os.path.join(args.dump_path, "parameters.json" ), "w" ) as f:
json.dump(vars(UpperCAmelCase_ ), UpperCAmelCase_, indent=4 )
git_log(args.dump_path )
A__ , A__ , A__ = MODEL_CLASSES[args.student_type]
A__ , A__ , A__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
A__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
A__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
A__ = tokenizer.all_special_tokens.index(UpperCAmelCase_ )
A__ = tokenizer.all_special_ids[idx]
logger.info(F"""Special tokens {special_tok_ids}""" )
A__ = special_tok_ids
A__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"""Loading data from {args.data_file}""" )
with open(args.data_file, "rb" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
if args.mlm:
logger.info(F"""Loading token counts from {args.token_counts} (already pre-computed)""" )
with open(args.token_counts, "rb" ) as fp:
A__ = pickle.load(UpperCAmelCase_ )
A__ = np.maximum(UpperCAmelCase_, 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
A__ = 0.0 # do not predict special tokens
A__ = torch.from_numpy(UpperCAmelCase_ )
else:
A__ = None
A__ = LmSeqsDataset(params=UpperCAmelCase_, data=UpperCAmelCase_ )
logger.info("Data loader created." )
# STUDENT #
logger.info(F"""Loading student config from {args.student_config}""" )
A__ = student_config_class.from_pretrained(args.student_config )
A__ = True
if args.student_pretrained_weights is not None:
logger.info(F"""Loading pretrained weights from {args.student_pretrained_weights}""" )
A__ = student_model_class.from_pretrained(args.student_pretrained_weights, config=UpperCAmelCase_ )
else:
A__ = student_model_class(UpperCAmelCase_ )
if args.n_gpu > 0:
student.to(F"""cuda:{args.local_rank}""" )
logger.info("Student loaded." )
# TEACHER #
A__ = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=UpperCAmelCase_ )
if args.n_gpu > 0:
teacher.to(F"""cuda:{args.local_rank}""" )
logger.info(F"""Teacher loaded from {args.teacher_name}.""" )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCAmelCase_, UpperCAmelCase_ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCAmelCase_, UpperCAmelCase_ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
A__ = Distiller(
params=UpperCAmelCase_, dataset=UpperCAmelCase_, token_probs=UpperCAmelCase_, student=UpperCAmelCase_, teacher=UpperCAmelCase_ )
distiller.train()
logger.info("Let's go get some drinks." )
if __name__ == "__main__":
main()
| 104
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a: Union[str, Any] = logging.get_logger(__name__)
_a: Dict = {"""tokenizer_file""": """tokenizer.json"""}
_a: Dict = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE__ = None
def __init__( self : Tuple , lowerCAmelCase : Optional[int]=None , lowerCAmelCase : int=None , lowerCAmelCase : str=None , lowerCAmelCase : List[Any]="<unk>" , lowerCAmelCase : int="<s>" , lowerCAmelCase : Dict="</s>" , lowerCAmelCase : List[str]="<pad>" , lowerCAmelCase : int=False , lowerCAmelCase : Optional[int]=False , **lowerCAmelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def __A ( self : int , *lowerCAmelCase : Any , **lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Dict , *lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = kwargs.get("is_split_into_words" , lowerCAmelCase )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def __A ( self : str , lowerCAmelCase : "Conversation" ):
'''simple docstring'''
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 162
| 0
|
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :Optional[Any] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , __A )
__UpperCAmelCase = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__UpperCAmelCase = dataset_size < in_memory_max_size
else:
__UpperCAmelCase = False
__UpperCAmelCase = is_small_dataset(__A )
assert result == expected
| 701
|
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 397
| 0
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __A ( lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Tuple = FlaxAutoencoderKL
@property
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = 4
lowerCAmelCase : Optional[int] = 3
lowerCAmelCase : int = (32, 32)
lowerCAmelCase : Optional[Any] = jax.random.PRNGKey(0 )
lowerCAmelCase : Optional[Any] = jax.random.uniform(UpperCAmelCase_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Any = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
lowerCAmelCase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
| 343
|
from ...processing_utils import ProcessorMixin
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : str = "SpeechT5FeatureExtractor"
lowerCAmelCase_ : Any = "SpeechT5Tokenizer"
def __init__( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str ):
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
def __call__( self : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = kwargs.pop('audio' , UpperCAmelCase_ )
lowerCAmelCase : str = kwargs.pop('text' , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = kwargs.pop('text_target' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('audio_target' , UpperCAmelCase_ )
lowerCAmelCase : int = kwargs.pop('sampling_rate' , UpperCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
elif text is not None:
lowerCAmelCase : List[Any] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : Any = None
if audio_target is not None:
lowerCAmelCase : Tuple = self.feature_extractor(audio_target=UpperCAmelCase_ , *UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : int = targets['input_values']
elif text_target is not None:
lowerCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = targets['input_ids']
else:
lowerCAmelCase : Union[str, Any] = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Dict = labels
lowerCAmelCase : Any = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Tuple = decoder_attention_mask
return inputs
def lowercase__ ( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict ):
lowerCAmelCase : Optional[Any] = kwargs.pop('input_values' , UpperCAmelCase_ )
lowerCAmelCase : List[str] = kwargs.pop('input_ids' , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = kwargs.pop('labels' , UpperCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
lowerCAmelCase : List[str] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
elif input_ids is not None:
lowerCAmelCase : Dict = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
else:
lowerCAmelCase : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and "input_ids" in labels[0]):
lowerCAmelCase : int = self.tokenizer.pad(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Dict = targets['input_ids']
else:
lowerCAmelCase : Any = self.feature_extractor.feature_size
lowerCAmelCase : str = self.feature_extractor.num_mel_bins
lowerCAmelCase : Optional[int] = self.feature_extractor.pad(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : List[Any] = feature_size_hack
lowerCAmelCase : Tuple = targets['input_values']
else:
lowerCAmelCase : Tuple = None
if inputs is None:
return targets
if targets is not None:
lowerCAmelCase : Union[str, Any] = labels
lowerCAmelCase : List[str] = targets.get('attention_mask' )
if decoder_attention_mask is not None:
lowerCAmelCase : Optional[Any] = decoder_attention_mask
return inputs
def lowercase__ ( self : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def lowercase__ ( self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 343
| 1
|
"""simple docstring"""
import os
import sys
import unittest
__UpperCAmelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__UpperCAmelCase = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
__UpperCAmelCase = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = get_test_to_tester_mapping(__A )
lowerCAmelCase_ :Any = get_test_to_tester_mapping(__A )
lowerCAmelCase_ :int = {"""BertModelTest""": """BertModelTester"""}
lowerCAmelCase_ :Union[str, Any] = {
"""BlipModelTest""": """BlipModelTester""",
"""BlipTextImageModelTest""": """BlipTextImageModelsModelTester""",
"""BlipTextModelTest""": """BlipTextModelTester""",
"""BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""",
"""BlipVQAModelTest""": """BlipVQAModelTester""",
"""BlipVisionModelTest""": """BlipVisionModelTester""",
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Optional[int] = get_model_to_test_mapping(__A )
lowerCAmelCase_ :str = get_model_to_test_mapping(__A )
lowerCAmelCase_ :str = {
"""BertForMaskedLM""": ["""BertModelTest"""],
"""BertForMultipleChoice""": ["""BertModelTest"""],
"""BertForNextSentencePrediction""": ["""BertModelTest"""],
"""BertForPreTraining""": ["""BertModelTest"""],
"""BertForQuestionAnswering""": ["""BertModelTest"""],
"""BertForSequenceClassification""": ["""BertModelTest"""],
"""BertForTokenClassification""": ["""BertModelTest"""],
"""BertLMHeadModel""": ["""BertModelTest"""],
"""BertModel""": ["""BertModelTest"""],
}
lowerCAmelCase_ :str = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTest"""],
"""BlipModel""": ["""BlipModelTest"""],
"""BlipTextModel""": ["""BlipTextModelTest"""],
"""BlipVisionModel""": ["""BlipVisionModelTest"""],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = get_model_to_tester_mapping(__A )
lowerCAmelCase_ :int = get_model_to_tester_mapping(__A )
lowerCAmelCase_ :Optional[int] = {
"""BertForMaskedLM""": ["""BertModelTester"""],
"""BertForMultipleChoice""": ["""BertModelTester"""],
"""BertForNextSentencePrediction""": ["""BertModelTester"""],
"""BertForPreTraining""": ["""BertModelTester"""],
"""BertForQuestionAnswering""": ["""BertModelTester"""],
"""BertForSequenceClassification""": ["""BertModelTester"""],
"""BertForTokenClassification""": ["""BertModelTester"""],
"""BertLMHeadModel""": ["""BertModelTester"""],
"""BertModel""": ["""BertModelTester"""],
}
lowerCAmelCase_ :Optional[Any] = {
"""BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""],
"""BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""],
"""BlipForQuestionAnswering""": ["""BlipVQAModelTester"""],
"""BlipModel""": ["""BlipModelTester"""],
"""BlipTextModel""": ["""BlipTextModelTester"""],
"""BlipVisionModel""": ["""BlipVisionModelTester"""],
}
self.assertEqual(get_test_info.to_json(__A ) , __A )
self.assertEqual(get_test_info.to_json(__A ) , __A )
| 256
|
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-1'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-2'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-3'
__UpperCAmelCase = 'CompVis/stable-diffusion-v1-4'
class _SCREAMING_SNAKE_CASE ( A__ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A , __A = True , ) -> Any:
super()._init_()
lowerCAmelCase_ :Dict = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Optional[int] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :List[str] = StableDiffusionPipeline.from_pretrained(__A )
lowerCAmelCase_ :Dict = StableDiffusionPipeline(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , scheduler=__A , safety_checker=__A , feature_extractor=__A , requires_safety_checker=__A , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def __lowerCAmelCase ( self ) -> Dict[str, Any]:
return {k: getattr(self , __A ) for k in self.config.keys() if not k.startswith("""_""" )}
def __lowerCAmelCase ( self , __A = "auto" ) -> List[str]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase_ :List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__A )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.enable_attention_slicing(__A )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Dict:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> str:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> Any:
return self.pipea(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
@torch.no_grad()
def __lowerCAmelCase ( self , __A , __A = 512 , __A = 512 , __A = 50 , __A = 7.5 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = "pil" , __A = True , __A = None , __A = 1 , **__A , ) -> List[Any]:
lowerCAmelCase_ :List[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__A )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCAmelCase_ :Any = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCAmelCase_ :Dict = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCAmelCase_ :Union[str, Any] = self.textaimg_sda_a(
prompt=__A , height=__A , width=__A , num_inference_steps=__A , guidance_scale=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , output_type=__A , return_dict=__A , callback=__A , callback_steps=__A , **__A , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 256
| 1
|
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a ( ) ->Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('''--model_ckpt''' , type=a , default='''microsoft/unixcoder-base-nine''' )
parser.add_argument('''--num_epochs''' , type=a , default=5 )
parser.add_argument('''--batch_size''' , type=a , default=6 )
parser.add_argument('''--gradient_accumulation_steps''' , type=a , default=1 )
parser.add_argument('''--freeze''' , type=a , default=a )
parser.add_argument('''--learning_rate''' , type=a , default=5E-4 )
parser.add_argument('''--seed''' , type=a , default=0 )
parser.add_argument('''--lr_scheduler_type''' , type=a , default='''cosine''' )
parser.add_argument('''--num_warmup_steps''' , type=a , default=10 )
parser.add_argument('''--weight_decay''' , type=a , default=0.01 )
parser.add_argument('''--output_dir''' , type=a , default='''./results''' )
return parser.parse_args()
__lowerCAmelCase = load('accuracy')
def a ( a ) ->List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = eval_pred
SCREAMING_SNAKE_CASE = np.argmax(a , axis=1 )
return metric.compute(predictions=a , references=a )
class lowerCamelCase ( __lowerCamelCase ):
def __init__( self :str , lowercase :List[Any] ) -> None:
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE = trainer
def snake_case__ ( self :Dict , lowercase :Tuple , lowercase :List[str] , lowercase :Optional[int] , **lowercase :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if control.should_evaluate:
SCREAMING_SNAKE_CASE = deepcopy(lowercase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='''train''' )
return control_copy
def a ( ) ->List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_args()
set_seed(args.seed )
SCREAMING_SNAKE_CASE = load_dataset('''codeparrot/codecomplex''' , split='''train''' )
SCREAMING_SNAKE_CASE = dataset.train_test_split(test_size=0.2 )
SCREAMING_SNAKE_CASE = train_test['''test'''].train_test_split(test_size=0.5 )
SCREAMING_SNAKE_CASE = DatasetDict(
{
'''train''': train_test['''train'''],
'''test''': test_validation['''train'''],
'''valid''': test_validation['''test'''],
} )
print('''Loading tokenizer and model''' )
SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(args.model_ckpt )
SCREAMING_SNAKE_CASE = tokenizer.eos_token
SCREAMING_SNAKE_CASE = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
SCREAMING_SNAKE_CASE = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = ClassLabel(num_classes=7 , names=list(set(train_test_validation['''train''']['''complexity'''] ) ) )
def tokenize(a ):
SCREAMING_SNAKE_CASE = tokenizer(example['''src'''] , truncation=a , max_length=1024 )
SCREAMING_SNAKE_CASE = labels.straint(example['''complexity'''] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
SCREAMING_SNAKE_CASE = train_test_validation.map(
a , batched=a , remove_columns=train_test_validation['''train'''].column_names , )
SCREAMING_SNAKE_CASE = DataCollatorWithPadding(tokenizer=a )
SCREAMING_SNAKE_CASE = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='''epoch''' , save_strategy='''epoch''' , logging_strategy='''epoch''' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='''accuracy''' , run_name='''complexity-java''' , report_to='''wandb''' , )
SCREAMING_SNAKE_CASE = Trainer(
model=a , args=a , train_dataset=tokenized_datasets['''train'''] , eval_dataset=tokenized_datasets['''valid'''] , tokenizer=a , data_collator=a , compute_metrics=a , )
print('''Training...''' )
trainer.add_callback(CustomCallback(a ) )
trainer.train()
if __name__ == "__main__":
main()
| 201
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Optional[Any] = 'MCTCTFeatureExtractor'
UpperCamelCase_ : List[Any] = 'AutoTokenizer'
def __init__( self :Tuple , lowercase :List[str] , lowercase :Dict ) -> Tuple:
"""simple docstring"""
super().__init__(lowercase , lowercase )
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
def __call__( self :Union[str, Any] , *lowercase :Union[str, Any] , **lowercase :str ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowercase , **lowercase )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE = kwargs.pop('''audio''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''sampling_rate''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''text''' , lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor(lowercase , *lowercase , sampling_rate=lowercase , **lowercase )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(lowercase , **lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE = encodings['''input_ids''']
return inputs
def snake_case__ ( self :Dict , *lowercase :Union[str, Any] , **lowercase :List[str] ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def snake_case__ ( self :List[Any] , *lowercase :List[Any] , **lowercase :List[str] ) -> int:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowercase , **lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''input_features''' , lowercase )
SCREAMING_SNAKE_CASE = kwargs.pop('''labels''' , lowercase )
if len(lowercase ) > 0:
SCREAMING_SNAKE_CASE = args[0]
SCREAMING_SNAKE_CASE = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE = self.feature_extractor.pad(lowercase , *lowercase , **lowercase )
if labels is not None:
SCREAMING_SNAKE_CASE = self.tokenizer.pad(lowercase , **lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE = labels['''input_ids''']
return input_features
def snake_case__ ( self :Dict , *lowercase :List[str] , **lowercase :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*lowercase , **lowercase )
@contextmanager
def snake_case__ ( self :str ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = self.tokenizer
yield
SCREAMING_SNAKE_CASE = self.feature_extractor
SCREAMING_SNAKE_CASE = False
| 201
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Optional[int] = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a__ = 'lxmert'
a__ = {}
def __init__( self : List[Any] , a : Dict=30_522 , a : Optional[Any]=768 , a : List[Any]=12 , a : List[Any]=9_500 , a : List[Any]=1_600 , a : int=400 , a : List[str]=3_072 , a : Optional[Any]="gelu" , a : List[Any]=0.1 , a : List[str]=0.1 , a : Optional[int]=512 , a : int=2 , a : Dict=0.02 , a : Any=1E-1_2 , a : Union[str, Any]=9 , a : int=5 , a : Any=5 , a : Union[str, Any]=2_048 , a : List[str]=4 , a : List[str]=6.67 , a : Tuple=True , a : Dict=True , a : List[Any]=True , a : List[str]=True , a : Optional[Any]=True , a : str=True , a : Dict=True , **a : Optional[int] , ) -> List[Any]:
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = type_vocab_size
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = num_qa_labels
SCREAMING_SNAKE_CASE = num_object_labels
SCREAMING_SNAKE_CASE = num_attr_labels
SCREAMING_SNAKE_CASE = l_layers
SCREAMING_SNAKE_CASE = x_layers
SCREAMING_SNAKE_CASE = r_layers
SCREAMING_SNAKE_CASE = visual_feat_dim
SCREAMING_SNAKE_CASE = visual_pos_dim
SCREAMING_SNAKE_CASE = visual_loss_normalizer
SCREAMING_SNAKE_CASE = task_matched
SCREAMING_SNAKE_CASE = task_mask_lm
SCREAMING_SNAKE_CASE = task_obj_predict
SCREAMING_SNAKE_CASE = task_qa
SCREAMING_SNAKE_CASE = visual_obj_loss
SCREAMING_SNAKE_CASE = visual_attr_loss
SCREAMING_SNAKE_CASE = visual_feat_loss
SCREAMING_SNAKE_CASE = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_a )
| 721
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Dict = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 450
| 0
|
'''simple docstring'''
from __future__ import annotations
import requests
__SCREAMING_SNAKE_CASE = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def __a ( lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "new" , lowerCAmelCase__ : list | None = None ):
a__ : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCAmelCase__ ) - valid_terms ) ):
a__ : Union[str, Any] = F'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCAmelCase__ )
a__ : List[Any] = requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 429:
raise requests.HTTPError
a__ : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCAmelCase__ )}
a__ : Any = {}
for id_ in range(lowerCAmelCase__ ):
a__ : str = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 688
|
'''simple docstring'''
import enum
import shutil
import sys
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = shutil.get_terminal_size()
__SCREAMING_SNAKE_CASE = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class lowerCAmelCase__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase = 0
__UpperCamelCase = 1
def __a ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict="" ):
sys.stdout.write(str(lowerCAmelCase__ ) + end )
sys.stdout.flush()
def __a ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : int , lowerCAmelCase__ : int="" ):
forceWrite(F'\u001b[{color}m{content}\u001b[0m' , lowerCAmelCase__ )
def __a ( ):
forceWrite('''\r''' )
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : str ):
forceWrite(F'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def __a ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __a ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 688
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[Any] =logging.get_logger(__name__)
A_ : int ={
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class __UpperCAmelCase ( __a ):
__A : str = 'luke'
def __init__( self , _lowerCamelCase=5_0267 , _lowerCamelCase=50_0000 , _lowerCamelCase=768 , _lowerCamelCase=256 , _lowerCamelCase=12 , _lowerCamelCase=12 , _lowerCamelCase=3072 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=1E-12 , _lowerCamelCase=True , _lowerCamelCase=None , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
lowerCAmelCase_ = vocab_size
lowerCAmelCase_ = entity_vocab_size
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = entity_emb_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = max_position_embeddings
lowerCAmelCase_ = type_vocab_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = layer_norm_eps
lowerCAmelCase_ = use_entity_aware_attention
lowerCAmelCase_ = classifier_dropout
| 712
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def snake_case_ ( __snake_case : Callable) -> Callable:
@wraps(__snake_case)
def _inner_fn(*__snake_case : str , **__snake_case : Optional[int]):
warnings.warn(
(F'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , __snake_case , )
return fn(*__snake_case , **__snake_case)
return _inner_fn
| 606
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : str , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : int=18 , UpperCAmelCase__ : List[str]=30 , UpperCAmelCase__ : List[str]=400 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
lowercase : Dict =size if size is not None else {'''shortest_edge''': 18}
lowercase : Tuple =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase : Any =parent
lowercase : List[str] =batch_size
lowercase : Tuple =num_channels
lowercase : Any =image_size
lowercase : str =min_resolution
lowercase : Optional[Any] =max_resolution
lowercase : Dict =do_resize
lowercase : List[Any] =size
lowercase : Optional[int] =do_center_crop
lowercase : Tuple =crop_size
lowercase : Tuple =do_normalize
lowercase : Tuple =image_mean
lowercase : List[Any] =image_std
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ):
lowerCamelCase_ = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : List[Any] =LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Any =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
lowercase : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
# Initialize image_processing
lowercase : Tuple =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , Image.Image )
# Test not batched input
lowercase : List[str] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Optional[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
# Initialize image_processing
lowercase : List[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , np.ndarray )
# Test not batched input
lowercase : Any =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : List[Any] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# Initialize image_processing
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Union[str, Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase__ , torch.Tensor )
# Test not batched input
lowercase : Union[str, Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase : Dict =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 92
|
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase_ = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'esm'
def __init__( self : Optional[Any] , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : Optional[Any]=3072 , UpperCAmelCase__ : Dict=0.1 , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : int=1026 , UpperCAmelCase__ : Tuple=0.02 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : List[Any]="absolute" , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : str=False , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : List[str]=None , **UpperCAmelCase__ : int , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , mask_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Any =vocab_size
lowercase : List[Any] =hidden_size
lowercase : Any =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : int =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Optional[int] =max_position_embeddings
lowercase : Union[str, Any] =initializer_range
lowercase : Tuple =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : List[Any] =use_cache
lowercase : Dict =emb_layer_norm_before
lowercase : Optional[Any] =token_dropout
lowercase : Union[str, Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
lowercase : Any =EsmFoldConfig()
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase : Optional[int] =EsmFoldConfig(**UpperCAmelCase__ )
lowercase : Union[str, Any] =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
lowercase : int =get_default_vocab_list()
else:
lowercase : Tuple =vocab_list
else:
lowercase : Union[str, Any] =None
lowercase : Dict =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , UpperCAmelCase__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase__ ):
lowercase : Optional[Any] =self.esmfold_config.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 0
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
if self.trunk is None:
lowercase : str =TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase__ ):
lowercase : int =TrunkConfig(**self.trunk )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =asdict(self )
lowercase : Union[str, Any] =self.trunk.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 48
lowerCamelCase_ = 10_24
lowerCamelCase_ = 1_28
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 32
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = False
lowerCamelCase_ = 4
lowerCamelCase_ = 1_28
lowerCamelCase_ = None
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
if self.structure_module is None:
lowercase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase__ ):
lowercase : Union[str, Any] =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(F'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
F''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
F''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
lowercase : str =self.sequence_state_dim // self.sequence_head_width
lowercase : int =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
F''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
F''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(F'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
lowercase : List[Any] =asdict(self )
lowercase : Any =self.structure_module.to_dict()
return output
@dataclass
class __SCREAMING_SNAKE_CASE :
lowerCamelCase_ = 3_84
lowerCamelCase_ = 1_28
lowerCamelCase_ = 16
lowerCamelCase_ = 1_28
lowerCamelCase_ = 12
lowerCamelCase_ = 4
lowerCamelCase_ = 8
lowerCamelCase_ = 0.1
lowerCamelCase_ = 8
lowerCamelCase_ = 1
lowerCamelCase_ = 2
lowerCamelCase_ = 7
lowerCamelCase_ = 10
lowerCamelCase_ = 1E-8
lowerCamelCase_ = 1E5
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return asdict(self )
def _lowerCAmelCase ( ) -> Optional[int]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 92
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_a : List[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_a : Optional[int] = re.compile(R"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_a : List[str] = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def snake_case__ ( UpperCAmelCase : Tuple ):
lowerCAmelCase__ :Tuple = None
# source code of `config_class`
lowerCAmelCase__ :List[Any] = inspect.getsource(UpperCAmelCase )
lowerCAmelCase__ :str = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith("/" ):
lowerCAmelCase__ :int = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ :Union[str, Any] = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ :List[str] = ckpt_name
break
return checkpoint
def snake_case__ ( ):
lowerCAmelCase__ :int = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ :Dict = get_checkpoint_from_config_class(UpperCAmelCase )
lowerCAmelCase__ :Tuple = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
lowerCAmelCase__ :Optional[int] = "\n".join(sorted(UpperCAmelCase ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 111
|
def snake_case__ ( UpperCAmelCase : Tuple ):
lowerCAmelCase__ :List[Any] = len(UpperCAmelCase )
for i in range(length - 1 ):
lowerCAmelCase__ :Union[str, Any] = i
for k in range(i + 1 , UpperCAmelCase ):
if collection[k] < collection[least]:
lowerCAmelCase__ :Any = k
if least != i:
lowerCAmelCase__ ,lowerCAmelCase__ :int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
_a : Optional[int] = input("""Enter numbers separated by a comma:\n""").strip()
_a : Any = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 111
| 1
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=UpperCamelCase_ ):
__a = ["onnx"]
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[int]:
requires_backends(cls , ['''onnx'''] )
@classmethod
def UpperCamelCase_ ( cls , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
requires_backends(cls , ['''onnx'''] )
| 64
|
def A__ ( snake_case_ : int ):
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
SCREAMING_SNAKE_CASE__: List[Any]= [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
SCREAMING_SNAKE_CASE__: List[str]= 1
if upper_limit > 0:
SCREAMING_SNAKE_CASE__: List[str]= 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(snake_case_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('\n********* Catalan Numbers Using Dynamic Programming ************\n')
print('\n*** Enter -1 at any time to quit ***')
print('\nEnter the upper limit (≥ 0) for the Catalan number sequence: ', end='')
try:
while True:
lowercase_ : Any = int(input().strip())
if N < 0:
print('\n********* Goodbye!! ************')
break
else:
print(f'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('Try another upper limit for the sequence: ', end='')
except (NameError, ValueError):
print('\n********* Invalid input, goodbye! ************\n')
import doctest
doctest.testmod()
| 64
| 1
|
SCREAMING_SNAKE_CASE__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
SCREAMING_SNAKE_CASE__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 12,
"Pm": 15,
"Em": 18,
"Zm": 21,
"Ym": 24,
}
def lowercase ( a , a , a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[str] = from_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ :Any = to_type.lower().strip("s" )
SCREAMING_SNAKE_CASE_ :List[Any] = UNIT_SYMBOL.get(a , a )
SCREAMING_SNAKE_CASE_ :Dict = UNIT_SYMBOL.get(a , a )
if from_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ :Any = (
F"Invalid 'from_type' value: {from_type!r}.\n"
F"Conversion abbreviations are: {', '.join(a )}"
)
raise ValueError(a )
if to_sanitized not in METRIC_CONVERSION:
SCREAMING_SNAKE_CASE_ :int = (
F"Invalid 'to_type' value: {to_type!r}.\n"
F"Conversion abbreviations are: {', '.join(a )}"
)
raise ValueError(a )
SCREAMING_SNAKE_CASE_ :Tuple = METRIC_CONVERSION[from_sanitized]
SCREAMING_SNAKE_CASE_ :int = METRIC_CONVERSION[to_sanitized]
SCREAMING_SNAKE_CASE_ :List[str] = 1
if from_exponent > to_exponent:
SCREAMING_SNAKE_CASE_ :str = from_exponent - to_exponent
else:
SCREAMING_SNAKE_CASE_ :List[Any] = -(to_exponent - from_exponent)
return value * pow(10 , a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140
|
from __future__ import annotations
def lowercase ( a , a , a ):
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 140
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a : Any = logging.get_logger(__name__)
__a : List[Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a : Tuple = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a : str = {'''facebook/blenderbot_small-90M''': 5_1_2}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowercase__ : List[str] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Union[str, Any] = char
lowercase__ : Any = set(lowerCAmelCase__ )
return pairs
class UpperCAmelCase( _UpperCamelCase ):
"""simple docstring"""
a : Union[str, Any] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : str = ["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase="__start__" , lowerCamelCase="__end__" , lowerCamelCase="__unk__" , lowerCamelCase="__null__" , **lowerCamelCase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=a_ , bos_token=a_ , eos_token=a_ , pad_token=a_ , **a_ )
with open(a_ , encoding="utf-8" ) as vocab_handle:
lowercase__ : Union[str, Any] = json.load(a_ )
lowercase__ : Any = {v: k for k, v in self.encoder.items()}
with open(a_ , encoding="utf-8" ) as merges_handle:
lowercase__ : List[str] = merges_handle.read().split("\n" )[1:-1]
lowercase__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
lowercase__ : Tuple = dict(zip(a_ , range(len(a_ ) ) ) )
lowercase__ : List[Any] = {}
@property
def __a ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowercase__ : List[str] = re.sub("([.,!?()])" , r" \1" , a_ )
lowercase__ : Optional[Any] = re.sub("(')" , r" \1 " , a_ )
lowercase__ : Any = re.sub(r"\s{2,}" , " " , a_ )
if "\n" in token:
lowercase__ : int = token.replace("\n" , " __newln__" )
lowercase__ : Tuple = token.split(" " )
lowercase__ : Tuple = []
for token in tokens:
if not len(a_ ):
continue
lowercase__ : Optional[int] = token.lower()
lowercase__ : int = tuple(a_ )
lowercase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
lowercase__ : Optional[int] = get_pairs(a_ )
if not pairs:
words.append(a_ )
continue
while True:
lowercase__ : str = min(a_ , key=lambda lowerCamelCase : self.bpe_ranks.get(a_ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ : Optional[int] = bigram
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = 0
while i < len(a_ ):
try:
lowercase__ : int = word.index(a_ , a_ )
new_word.extend(word[i:j] )
lowercase__ : Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : int = tuple(a_ )
lowercase__ : List[Any] = new_word
if len(a_ ) == 1:
break
else:
lowercase__ : int = get_pairs(a_ )
lowercase__ : Optional[Any] = "@@ ".join(a_ )
lowercase__ : str = word[:-4]
lowercase__ : Union[str, Any] = word
words.append(a_ )
return " ".join(a_ )
def __a ( self , lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowercase__ : int = []
lowercase__ : Tuple = re.findall(r"\S+\n?" , a_ )
for token in words:
split_tokens.extend(list(self.bpe(a_ ).split(" " ) ) )
return split_tokens
def __a ( self , lowerCamelCase ) -> int:
"""simple docstring"""
lowercase__ : int = token.lower()
return self.encoder.get(a_ , self.encoder.get(self.unk_token ) )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(a_ , self.unk_token )
def __a ( self , lowerCamelCase ) -> str:
"""simple docstring"""
lowercase__ : Any = " ".join(a_ ).replace("@@ " , "" ).strip()
return out_string
def __a ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : int = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : Tuple = os.path.join(
a_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a_ , ensure_ascii=a_ ) + "\n" )
lowercase__ : List[str] = 0
with open(a_ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
lowercase__ : Optional[Any] = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
| 397
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase__ : str ) -> list[int]:
'''simple docstring'''
a__ : List[str] = [0 for i in range(len(lowerCAmelCase__ ) )]
# initialize interval's left pointer and right pointer
a__ , a__ : int = 0, 0
for i in range(1 , len(lowerCAmelCase__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
a__ : List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
a__ : List[str] = min_edge
while go_next(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
a__ , a__ : str = i, i + z_result[i] - 1
return z_result
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : str ) -> bool:
'''simple docstring'''
return i + z_result[i] < len(lowerCAmelCase__ ) and s[z_result[i]] == s[i + z_result[i]]
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
a__ : List[Any] = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
a__ : List[str] = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(lowerCAmelCase__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 0
|
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase = Mapping[str, np.ndarray]
_UpperCAmelCase = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase = 0.0_1
@dataclasses.dataclass(frozen=lowerCAmelCase__ )
class __magic_name__ :
"""simple docstring"""
_UpperCamelCase = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
_UpperCamelCase = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
_UpperCamelCase = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
_UpperCamelCase = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
_UpperCamelCase = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
_UpperCamelCase = None
# Optional remark about the protein. Included as a comment in output PDB
# files
_UpperCamelCase = None
# Templates used to generate this protein (prediction-only)
_UpperCamelCase = None
# Chain corresponding to each parent
_UpperCamelCase = None
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = R'''(\[[A-Z]+\]\n)'''
_lowerCamelCase = [tag.strip() for tag in re.split(_A , _A ) if len(_A ) > 0]
_lowerCamelCase = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
_lowerCamelCase = ['''N''', '''CA''', '''C''']
_lowerCamelCase = None
_lowerCamelCase = None
_lowerCamelCase = None
for g in groups:
if "[PRIMARY]" == g[0]:
_lowerCamelCase = g[1][0].strip()
for i in range(len(_A ) ):
if seq[i] not in residue_constants.restypes:
_lowerCamelCase = '''X''' # FIXME: strings are immutable
_lowerCamelCase = np.array(
[residue_constants.restype_order.get(_A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
_lowerCamelCase = []
for axis in range(3 ):
tertiary.append(list(map(_A , g[1][axis].split() ) ) )
_lowerCamelCase = np.array(_A )
_lowerCamelCase = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(_A ):
_lowerCamelCase = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
_lowerCamelCase = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
_lowerCamelCase = np.zeros(
(
len(_A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(_A ):
_lowerCamelCase = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=_A , atom_mask=_A , aatype=_A , residue_index=np.arange(len(_A ) ) , b_factors=_A , )
def _lowerCamelCase ( _a , _a = 0 ):
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = prot.remark
if remark is not None:
pdb_headers.append(F'''REMARK {remark}''' )
_lowerCamelCase = prot.parents
_lowerCamelCase = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
_lowerCamelCase = [p for i, p in zip(_A , _A ) if i == chain_id]
if parents is None or len(_A ) == 0:
_lowerCamelCase = ['''N/A''']
pdb_headers.append(F'''PARENT {' '.join(_A )}''' )
return pdb_headers
def _lowerCamelCase ( _a , _a ):
"""simple docstring"""
_lowerCamelCase = []
_lowerCamelCase = pdb_str.split('''\n''' )
_lowerCamelCase = prot.remark
if remark is not None:
out_pdb_lines.append(F'''REMARK {remark}''' )
_lowerCamelCase = 4_2
if prot.parents is not None and len(prot.parents ) > 0:
_lowerCamelCase = []
if prot.parents_chain_index is not None:
_lowerCamelCase = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(_A ) , [] )
parent_dict[str(_A )].append(_A )
_lowerCamelCase = max([int(_A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
_lowerCamelCase = parent_dict.get(str(_A ) , ['''N/A'''] )
parents_per_chain.append(_A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
_lowerCamelCase = [['''N/A''']]
def make_parent_line(_a ) -> str:
return F'''PARENT {' '.join(_A )}'''
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
_lowerCamelCase = 0
for i, l in enumerate(_A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(_A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(_A ):
_lowerCamelCase = parents_per_chain[chain_counter]
else:
_lowerCamelCase = ['''N/A''']
out_pdb_lines.append(make_parent_line(_A ) )
return "\n".join(_A )
def _lowerCamelCase ( _a ):
"""simple docstring"""
_lowerCamelCase = residue_constants.restypes + ['''X''']
def res_atoa(_a ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
_lowerCamelCase = residue_constants.atom_types
_lowerCamelCase = []
_lowerCamelCase = prot.atom_mask
_lowerCamelCase = prot.aatype
_lowerCamelCase = prot.atom_positions
_lowerCamelCase = prot.residue_index.astype(np.intaa )
_lowerCamelCase = prot.b_factors
_lowerCamelCase = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
_lowerCamelCase = get_pdb_headers(_A )
if len(_A ) > 0:
pdb_lines.extend(_A )
_lowerCamelCase = aatype.shape[0]
_lowerCamelCase = 1
_lowerCamelCase = 0
_lowerCamelCase = string.ascii_uppercase
_lowerCamelCase = None
# Add all atom sites.
for i in range(_A ):
_lowerCamelCase = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(_A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
_lowerCamelCase = '''ATOM'''
_lowerCamelCase = atom_name if len(_A ) == 4 else F''' {atom_name}'''
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = 1.00
_lowerCamelCase = atom_name[0] # Protein supports only C, N, O, S, this works.
_lowerCamelCase = ''''''
_lowerCamelCase = '''A'''
if chain_index is not None:
_lowerCamelCase = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
_lowerCamelCase = (
F'''{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'''
F'''{res_name_a:>3} {chain_tag:>1}'''
F'''{residue_index[i]:>4}{insertion_code:>1} '''
F'''{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'''
F'''{occupancy:>6.2f}{b_factor:>6.2f} '''
F'''{element:>2}{charge:>2}'''
)
pdb_lines.append(_A )
atom_index += 1
_lowerCamelCase = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
_lowerCamelCase = True
_lowerCamelCase = chain_index[i + 1]
if should_terminate:
# Close the chain.
_lowerCamelCase = '''TER'''
_lowerCamelCase = (
F'''{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'''
)
pdb_lines.append(_A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(_A , _A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(_A )
def _lowerCamelCase ( _a ):
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _lowerCamelCase ( _a , _a , _a = None , _a = None , _a = None , _a = None , _a = None , ):
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=_A , remark=_A , parents=_A , parents_chain_index=_A , )
| 709
|
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def _lowerCamelCase ( _a ):
"""simple docstring"""
random.seed(_a )
np.random.seed(_a )
torch.manual_seed(_a )
torch.cuda.manual_seed_all(_a )
# ^^ safe to call this function even if cuda is not available
class __magic_name__ :
"""simple docstring"""
def __init__( self , a__ , a__ = 0.9999 , a__ = 0.0 , a__ = 0 , a__ = False , a__ = 1.0 , a__ = 2 / 3 , a__ = None , a__ = None , **a__ , ):
if isinstance(a__ , torch.nn.Module ):
_lowerCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , a__ , standard_warn=a__ , )
_lowerCamelCase = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_lowerCamelCase = True
if kwargs.get('''max_value''' , a__ ) is not None:
_lowerCamelCase = '''The `max_value` argument is deprecated. Please use `decay` instead.'''
deprecate('''max_value''' , '''1.0.0''' , a__ , standard_warn=a__ )
_lowerCamelCase = kwargs['''max_value''']
if kwargs.get('''min_value''' , a__ ) is not None:
_lowerCamelCase = '''The `min_value` argument is deprecated. Please use `min_decay` instead.'''
deprecate('''min_value''' , '''1.0.0''' , a__ , standard_warn=a__ )
_lowerCamelCase = kwargs['''min_value''']
_lowerCamelCase = list(a__ )
_lowerCamelCase = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' , a__ ) is not None:
_lowerCamelCase = '''The `device` argument is deprecated. Please use `to` instead.'''
deprecate('''device''' , '''1.0.0''' , a__ , standard_warn=a__ )
self.to(device=kwargs['''device'''] )
_lowerCamelCase = None
_lowerCamelCase = decay
_lowerCamelCase = min_decay
_lowerCamelCase = update_after_step
_lowerCamelCase = use_ema_warmup
_lowerCamelCase = inv_gamma
_lowerCamelCase = power
_lowerCamelCase = 0
_lowerCamelCase = None # set in `step()`
_lowerCamelCase = model_cls
_lowerCamelCase = model_config
@classmethod
def _UpperCAmelCase ( cls , a__ , a__ ):
_lowerCamelCase , _lowerCamelCase = model_cls.load_config(a__ , return_unused_kwargs=a__ )
_lowerCamelCase = model_cls.from_pretrained(a__ )
_lowerCamelCase = cls(model.parameters() , model_cls=a__ , model_config=model.config )
ema_model.load_state_dict(a__ )
return ema_model
def _UpperCAmelCase ( self , a__ ):
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
_lowerCamelCase = self.model_cls.from_config(self.model_config )
_lowerCamelCase = self.state_dict()
state_dict.pop('''shadow_params''' , a__ )
model.register_to_config(**a__ )
self.copy_to(model.parameters() )
model.save_pretrained(a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_lowerCamelCase = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_lowerCamelCase = (1 + step) / (10 + step)
_lowerCamelCase = min(a__ , self.decay )
# make sure decay is not smaller than min_decay
_lowerCamelCase = max(a__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def _UpperCAmelCase ( self , a__ ):
if isinstance(a__ , torch.nn.Module ):
_lowerCamelCase = (
'''Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. '''
'''Please pass the parameters of the module instead.'''
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , a__ , standard_warn=a__ , )
_lowerCamelCase = parameters.parameters()
_lowerCamelCase = list(a__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_lowerCamelCase = self.get_decay(self.optimization_step )
_lowerCamelCase = decay
_lowerCamelCase = 1 - decay
_lowerCamelCase = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , a__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_lowerCamelCase = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(a__ )
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = list(a__ )
for s_param, param in zip(self.shadow_params , a__ ):
param.data.copy_(s_param.to(param.device ).data )
def _UpperCAmelCase ( self , a__=None , a__=None ):
_lowerCamelCase = [
p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ )
for p in self.shadow_params
]
def _UpperCAmelCase ( self ):
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = [param.detach().cpu().clone() for param in parameters]
def _UpperCAmelCase ( self , a__ ):
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params , a__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_lowerCamelCase = None
def _UpperCAmelCase ( self , a__ ):
_lowerCamelCase = copy.deepcopy(a__ )
_lowerCamelCase = state_dict.get('''decay''' , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
_lowerCamelCase = state_dict.get('''min_decay''' , self.min_decay )
if not isinstance(self.min_decay , a__ ):
raise ValueError('''Invalid min_decay''' )
_lowerCamelCase = state_dict.get('''optimization_step''' , self.optimization_step )
if not isinstance(self.optimization_step , a__ ):
raise ValueError('''Invalid optimization_step''' )
_lowerCamelCase = state_dict.get('''update_after_step''' , self.update_after_step )
if not isinstance(self.update_after_step , a__ ):
raise ValueError('''Invalid update_after_step''' )
_lowerCamelCase = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , a__ ):
raise ValueError('''Invalid use_ema_warmup''' )
_lowerCamelCase = state_dict.get('''inv_gamma''' , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError('''Invalid inv_gamma''' )
_lowerCamelCase = state_dict.get('''power''' , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError('''Invalid power''' )
_lowerCamelCase = state_dict.get('''shadow_params''' , a__ )
if shadow_params is not None:
_lowerCamelCase = shadow_params
if not isinstance(self.shadow_params , a__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 297
| 0
|
"""simple docstring"""
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = True , _lowercase = None , _lowercase = 32 , _lowercase = True , _lowercase = 1 / 255 , _lowercase = True , _lowercase = True , _lowercase = [0.48145466, 0.4578275, 0.40821073] , _lowercase = [0.26862954, 0.26130258, 0.27577711] , _lowercase = True , _lowercase=7 , _lowercase=30 , _lowercase=400 , _lowercase=3 , ) -> Tuple:
_lowerCamelCase : Union[str, Any] = parent
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 288}
_lowerCamelCase : int = size_divisor
_lowerCamelCase : int = do_rescale
_lowerCamelCase : List[Any] = rescale_factor
_lowerCamelCase : List[Any] = do_normalize
_lowerCamelCase : Tuple = do_center_crop
_lowerCamelCase : Optional[Any] = image_mean
_lowerCamelCase : Union[str, Any] = image_std
_lowerCamelCase : Dict = do_pad
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Tuple = min_resolution
_lowerCamelCase : Optional[int] = max_resolution
def a__ ( self ) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def a__ ( self , _lowercase , _lowercase=False ) -> Optional[Any]:
if not batched:
_lowerCamelCase : List[str] = self.size['''shortest_edge''']
_lowerCamelCase : List[str] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ , Image.Image ):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = image.size
else:
_lowerCamelCase, _lowerCamelCase : List[Any] = image.shape[1], image.shape[2]
_lowerCamelCase : Optional[Any] = size / min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if h < w:
_lowerCamelCase, _lowerCamelCase : List[str] = size, scale * w
else:
_lowerCamelCase, _lowerCamelCase : int = scale * h, size
_lowerCamelCase : Union[str, Any] = int((1333 / 800) * size )
if max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > max_size:
_lowerCamelCase : Any = max_size / max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
_lowerCamelCase : int = newh * scale
_lowerCamelCase : Any = neww * scale
_lowerCamelCase, _lowerCamelCase : List[str] = int(newh + 0.5 ), int(neww + 0.5 )
_lowerCamelCase, _lowerCamelCase : str = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
_lowerCamelCase : Dict = []
for image in image_inputs:
_lowerCamelCase, _lowerCamelCase : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ , key=lambda _lowercase : item[0] )[0]
_lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ , key=lambda _lowercase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__snake_case = BridgeTowerImageProcessor if is_vision_available() else None
def a__ ( self ) -> int:
_lowerCamelCase : Any = BridgeTowerImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Dict:
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , '''size_divisor''' ) )
def a__ ( self ) -> Tuple:
pass
def a__ ( self ) -> Any:
# Initialize image processor
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , Image.Image )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> List[Any]:
# Initialize image processor
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , np.ndarray )
# Test not batched input
_lowerCamelCase : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Any = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> Tuple:
# Initialize image processor
_lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE__ , torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ , torch.Tensor )
# Test not batched input
_lowerCamelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_lowerCamelCase : List[str] = image_processing(SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' ).pixel_values
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 434
|
from __future__ import annotations
UpperCamelCase = '#'
class __lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
lowerCAmelCase__ = {}
def a ( self : Any , SCREAMING_SNAKE_CASE__ : str ) -> None:
lowerCAmelCase__ = self._trie
for char in text:
if char not in trie:
lowerCAmelCase__ = {}
lowerCAmelCase__ = trie[char]
lowerCAmelCase__ = True
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> tuple | list:
lowerCAmelCase__ = self._trie
for char in prefix:
if char in trie:
lowerCAmelCase__ = trie[char]
else:
return []
return self._elements(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : dict ) -> tuple:
lowerCAmelCase__ = []
for c, v in d.items():
lowerCAmelCase__ = [" "] if c == END else [(c + s) for s in self._elements(SCREAMING_SNAKE_CASE__ )]
result.extend(SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
UpperCamelCase = Trie()
UpperCamelCase = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ = trie.find_word(lowerCAmelCase_ )
return tuple(string + word for word in suffixes )
def _A ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 61
| 0
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , A_ : List[str] , A_ : int=13 , A_ : List[Any]=30 , A_ : Union[str, Any]=2 , A_ : Union[str, Any]=3 , A_ : Optional[int]=True , A_ : Any=True , A_ : Any=32 , A_ : Optional[Any]=5 , A_ : Optional[int]=4 , A_ : Optional[Any]=37 , A_ : Dict="gelu" , A_ : Any=0.1 , A_ : List[Any]=0.1 , A_ : Union[str, Any]=10 , A_ : Optional[int]=0.02 , )-> List[str]:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def A ( self : Any )-> List[Any]:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , )
return config, pixel_values
def A ( self : str , A_ : List[Any] , A_ : List[str] )-> Any:
__UpperCamelCase = FlaxViTModel(config=A_ )
__UpperCamelCase = model(A_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (self.image_size, self.image_size)
__UpperCamelCase = (self.patch_size, self.patch_size)
__UpperCamelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def A ( self : int , A_ : Any , A_ : Any )-> str:
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = FlaxViTForImageClassification(config=A_ )
__UpperCamelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = FlaxViTForImageClassification(A_ )
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(A_ )
def A ( self : Tuple )-> int:
__UpperCamelCase = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) = config_and_inputs
__UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def A ( self : List[str] )-> None:
__UpperCamelCase = FlaxViTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def A ( self : Optional[Any] )-> Any:
self.config_tester.run_common_tests()
def A ( self : Any )-> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def A ( self : Optional[Any] )-> Dict:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def A ( self : str )-> int:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(A_ )
__UpperCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def A ( self : Dict )-> Any:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase = self._prepare_for_class(A_ , A_ )
__UpperCamelCase = model_class(A_ )
@jax.jit
def model_jitted(A_ : Tuple , **A_ : List[Any] ):
return model(pixel_values=A_ , **A_ )
with self.subTest("JIT Enabled" ):
__UpperCamelCase = model_jitted(**A_ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCamelCase = model_jitted(**A_ ).to_tuple()
self.assertEqual(len(A_ ) , len(A_ ) )
for jitted_output, output in zip(A_ , A_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A ( self : List[str] )-> Optional[Any]:
for model_class_name in self.all_model_classes:
__UpperCamelCase = model_class_name.from_pretrained("google/vit-base-patch16-224" )
__UpperCamelCase = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(A_ )
| 228
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] )-> Tuple:
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps", "test_metrics.py"] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__UpperCamelCase = test_metrics
@require_cpu
def A ( self : List[str] )-> Tuple:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def A ( self : Optional[Any] )-> Union[str, Any]:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def A ( self : Tuple )-> Optional[int]:
self.test_metrics.main()
@require_multi_gpu
def A ( self : str )-> List[Any]:
print(f"""Found {torch.cuda.device_count()} devices.""" )
__UpperCamelCase = ["torchrun", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(A_ , env=os.environ.copy() )
| 228
| 1
|
"""simple docstring"""
import cva
import numpy as np
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE = k
SCREAMING_SNAKE_CASE = window_size
else:
raise ValueError('invalid k value' )
def __str__( self ) -> str:
return str(self.k )
def __A ( self , lowerCAmelCase__ ) -> tuple[cva.Mat, list[list[int]]]:
SCREAMING_SNAKE_CASE = cva.imread(lowerCAmelCase__ , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = img.shape
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = img.copy()
SCREAMING_SNAKE_CASE = cva.cvtColor(lowerCAmelCase__ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = np.gradient(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = dx**2
SCREAMING_SNAKE_CASE = dy**2
SCREAMING_SNAKE_CASE = dx * dy
SCREAMING_SNAKE_CASE = 0.04
SCREAMING_SNAKE_CASE = self.window_size // 2
for y in range(lowerCAmelCase__ , h - offset ):
for x in range(lowerCAmelCase__ , w - offset ):
SCREAMING_SNAKE_CASE = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE = wxx + wyy
SCREAMING_SNAKE_CASE = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
__UpperCamelCase = HarrisCorner(0.04, 3)
__UpperCamelCase,__UpperCamelCase = edge_detect.detect('''path_to_image''')
cva.imwrite('''detect.png''', color_img)
| 247
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__UpperCamelCase = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = PegasusConfig
SCREAMING_SNAKE_CASE_ : Optional[Any] = {}
SCREAMING_SNAKE_CASE_ : Dict = """gelu"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=20 , lowerCAmelCase__=2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , ) -> Tuple:
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = eos_token_id
SCREAMING_SNAKE_CASE = pad_token_id
SCREAMING_SNAKE_CASE = bos_token_id
def __A ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE = prepare_pegasus_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
SCREAMING_SNAKE_CASE = 20
SCREAMING_SNAKE_CASE = model_class_name(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
SCREAMING_SNAKE_CASE = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
SCREAMING_SNAKE_CASE = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'Max diff is {diff}' )
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Tuple=None , ) -> Union[str, Any]:
if attention_mask is None:
SCREAMING_SNAKE_CASE = np.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ : Tuple = True
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def __A ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase__ )
def __A ( self ) -> Any:
self.config_tester.run_common_tests()
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> str:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def __A ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE = model_class(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
SCREAMING_SNAKE_CASE = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('JIT Enabled' ):
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = np.ones((1, 1) )
SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@slow
def __A ( self ) -> Any:
SCREAMING_SNAKE_CASE = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
SCREAMING_SNAKE_CASE = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
SCREAMING_SNAKE_CASE = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors='np' , truncation=lowerCAmelCase__ , max_length=512 , padding=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
assert tgt_text == decoded
| 247
| 1
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : str=30 , __UpperCamelCase : int=2 , __UpperCamelCase : Any=3 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Any=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Optional[int]=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Any=10 , __UpperCamelCase : Union[str, Any]=0.02 , __UpperCamelCase : str=3 , __UpperCamelCase : Dict=0.6 , __UpperCamelCase : Optional[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : str = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : List[str] = patch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : Any = mask_ratio
snake_case__ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
snake_case__ : Optional[Any] = (image_size // patch_size) ** 2
snake_case__ : Tuple = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def lowerCAmelCase ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Any = ViTMAEModel(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a )
snake_case__ : Dict = (self.image_size // self.patch_size) ** 2
snake_case__ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
snake_case__ : int = 1
snake_case__ : str = ViTMAEForPreTraining(__a )
model.to(__a )
model.eval()
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : List[str] = model(__a )
snake_case__ : Optional[Any] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : int = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE (__lowercase, __lowercase, unittest.TestCase ):
A__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
A__ = False
A__ = False
A__ = False
A__ = False
def lowerCAmelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = ViTMAEModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(__a )
snake_case__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[Any] = [*signature.parameters.keys()]
snake_case__ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__a )
def lowerCAmelCase ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) -> Any:
"""simple docstring"""
np.random.seed(2 )
snake_case__ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
snake_case__ : Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
snake_case__ : Optional[int] = torch.from_numpy(__a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
snake_case__ : Any = pt_noise
super().check_pt_tf_models(__a , __a , __a )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__a )
model.to(__a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case__ : List[str] = model(**self._prepare_for_class(__a , __a ) )
snake_case__ : Dict = outputs[0].cpu().numpy()
snake_case__ : Dict = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__a )
snake_case__ : Dict = model_class.from_pretrained(__a )
model.to(__a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(__a , __a ) )
# Make sure we don't have nans
snake_case__ : List[str] = after_outputs[0].cpu().numpy()
snake_case__ : Optional[Any] = 0
snake_case__ : List[str] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(__a , 1e-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = ViTMAEModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def __UpperCAmelCase ( ) -> List[str]:
snake_case__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
np.random.seed(2 )
snake_case__ : int = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(__a )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Dict = prepare_img()
snake_case__ : List[Any] = image_processor(images=__a , return_tensors='''pt''' ).to(__a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
snake_case__ : Tuple = ViTMAEConfig()
snake_case__ : List[Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
snake_case__ : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__a , noise=torch.from_numpy(__a ).to(device=__a ) )
# verify the logits
snake_case__ : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , __a )
snake_case__ : str = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(__a ) , atol=1e-4 ) )
| 720
|
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( UpperCamelCase__ :list[int] , UpperCamelCase__ :int ) -> list[list[int]]:
snake_case__ : list[list[int]] = []
snake_case__ : list[int] = []
snake_case__ : int = 0
snake_case__ : str = sum(UpperCamelCase__ )
create_state_space_tree(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return result
def __UpperCAmelCase ( UpperCamelCase__ :list[int] , UpperCamelCase__ :int , UpperCamelCase__ :int , UpperCamelCase__ :list[int] , UpperCamelCase__ :list[list[int]] , UpperCamelCase__ :int , ) -> None:
if sum(UpperCamelCase__ ) > max_sum or (remaining_nums_sum + sum(UpperCamelCase__ )) < max_sum:
return
if sum(UpperCamelCase__ ) == max_sum:
result.append(UpperCamelCase__ )
return
for index in range(UpperCamelCase__ , len(UpperCamelCase__ ) ):
create_state_space_tree(
UpperCamelCase__ , UpperCamelCase__ , index + 1 , [*path, nums[index]] , UpperCamelCase__ , remaining_nums_sum - nums[index] , )
_lowercase : List[Any] =[3, 34, 4, 12, 5, 2]
_lowercase : List[str] =9
_lowercase : int =generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 574
| 0
|
from __future__ import annotations
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[int] , snake_case_ : str , snake_case_ : str ):
UpperCamelCase_, UpperCamelCase_: Optional[Any] = text, pattern
UpperCamelCase_, UpperCamelCase_: Union[str, Any] = len(snake_case_ ), len(snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : str ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCAmelCase__ ( self : List[str] , snake_case_ : int ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCAmelCase__ ( self : int ):
# searches pattern in text and returns index positions
UpperCamelCase_: List[Any] = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase_: Optional[Any] = self.mismatch_in_text(snake_case_ )
if mismatch_index == -1:
positions.append(snake_case_ )
else:
UpperCamelCase_: Union[str, Any] = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase_: str = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCamelCase_ : Any = """ABAABA"""
lowerCamelCase_ : str = """AB"""
lowerCamelCase_ : Union[str, Any] = BoyerMooreSearch(text, pattern)
lowerCamelCase_ : Optional[Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 548
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Dict = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Any = """dpr"""
def __init__( self : str , snake_case_ : Any=3_0522 , snake_case_ : List[str]=768 , snake_case_ : List[str]=12 , snake_case_ : Union[str, Any]=12 , snake_case_ : Tuple=3072 , snake_case_ : Any="gelu" , snake_case_ : Optional[Any]=0.1 , snake_case_ : str=0.1 , snake_case_ : Dict=512 , snake_case_ : int=2 , snake_case_ : Union[str, Any]=0.02 , snake_case_ : str=1e-12 , snake_case_ : List[str]=0 , snake_case_ : Any="absolute" , snake_case_ : int = 0 , **snake_case_ : int , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
UpperCamelCase_: str = vocab_size
UpperCamelCase_: Optional[int] = hidden_size
UpperCamelCase_: int = num_hidden_layers
UpperCamelCase_: Any = num_attention_heads
UpperCamelCase_: int = hidden_act
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Tuple = hidden_dropout_prob
UpperCamelCase_: Tuple = attention_probs_dropout_prob
UpperCamelCase_: str = max_position_embeddings
UpperCamelCase_: Any = type_vocab_size
UpperCamelCase_: Tuple = initializer_range
UpperCamelCase_: List[Any] = layer_norm_eps
UpperCamelCase_: Tuple = projection_dim
UpperCamelCase_: Dict = position_embedding_type
| 548
| 1
|
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase=None ):
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
UpperCAmelCase_ : List[Any] = nn.Parameter(_lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
UpperCAmelCase_ : Any = nn.Parameter(_lowercase )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = np.asarray(weights[0] )
UpperCAmelCase_ : Tuple = np.asarray(weights[1] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = np.asarray(weights[0] )
UpperCAmelCase_ : Optional[int] = np.asarray(weights[1] )
UpperCAmelCase_ : Dict = np.asarray(weights[2] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_lowercase ).transpose(1 , 2 ).contiguous().view(-1 , _lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(_lowercase ).view(-1 , _lowercase ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = weights[0][0][0]
UpperCAmelCase_ : Optional[int] = np.asarray(layer_norm_a[0] )
UpperCAmelCase_ : List[str] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# lsh weights + output
UpperCAmelCase_ : Any = weights[0][1]
if len(_lowercase ) < 4:
set_layer_weights_in_torch_lsh(_lowercase , torch_block.attention , _lowercase )
else:
set_layer_weights_in_torch_local(_lowercase , torch_block.attention , _lowercase )
# intermediate weighs
UpperCAmelCase_ : str = weights[2][0][1][2]
# Chunked Feed Forward
if len(_lowercase ) == 4:
UpperCAmelCase_ : Optional[int] = intermediate_weights[2]
# layernorm 2
UpperCAmelCase_ : Union[str, Any] = np.asarray(intermediate_weights[0][0] )
UpperCAmelCase_ : int = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# intermediate dense
UpperCAmelCase_ : Union[str, Any] = np.asarray(intermediate_weights[1][0] )
UpperCAmelCase_ : int = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
# intermediate out
UpperCAmelCase_ : List[Any] = np.asarray(intermediate_weights[4][0] )
UpperCAmelCase_ : Optional[Any] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = torch_model.reformer
# word embeds
UpperCAmelCase_ : List[str] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_lowercase ) , )
if isinstance(weights[3] , _lowercase ):
UpperCAmelCase_ : Optional[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCAmelCase_ : int = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
UpperCAmelCase_ : str = nn.Parameter(torch.tensor(_lowercase ) )
UpperCAmelCase_ : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCAmelCase_ : str = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_lowercase , _lowercase , _lowercase )
# output layer norm
UpperCAmelCase_ : Tuple = np.asarray(weights[7][0] )
UpperCAmelCase_ : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_lowercase ) , torch.tensor(_lowercase ) , )
# output embeddings
UpperCAmelCase_ : List[Any] = np.asarray(weights[9][0] )
UpperCAmelCase_ : Optional[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(_lowercase ) , )
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = ReformerConfig.from_json_file(_lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ : List[str] = ReformerModelWithLMHead(_lowercase )
with open(_lowercase , '''rb''' ) as f:
UpperCAmelCase_ : Dict = pickle.load(_lowercase )['''weights''']
set_model_weights_in_torch(_lowercase , _lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained Reformer model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 705
|
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__a = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__a = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__a = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __a( datasets.Metric ):
"""simple docstring"""
def a__ ( self ) -> int:
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage='''http://www.cs.umd.edu/~snover/tercom/''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ,id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ),
} ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] ,reference_urls=[
'''https://github.com/jhclark/tercom''',
] ,)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,) -> int:
UpperCAmelCase_ : Optional[Any] = len(references[0] )
if any(len(_SCREAMING_SNAKE_CASE ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
UpperCAmelCase_ : Dict = [[refs[i] for refs in references] for i in range(_SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ : Union[str, Any] = TER(
normalized=_SCREAMING_SNAKE_CASE ,no_punct=_SCREAMING_SNAKE_CASE ,asian_support=_SCREAMING_SNAKE_CASE ,case_sensitive=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : Tuple = sb_ter.corpus_score(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 300
| 0
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json',
'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'encodec'
def __init__( self ,_lowerCAmelCase=[1.5, 3.0, 6.0, 12.0, 24.0] ,_lowerCAmelCase=2_40_00 ,_lowerCAmelCase=1 ,_lowerCAmelCase=False ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=1_28 ,_lowerCAmelCase=32 ,_lowerCAmelCase=1 ,_lowerCAmelCase=[8, 5, 4, 2] ,_lowerCAmelCase="weight_norm" ,_lowerCAmelCase=7 ,_lowerCAmelCase=7 ,_lowerCAmelCase=3 ,_lowerCAmelCase=2 ,_lowerCAmelCase=True ,_lowerCAmelCase="reflect" ,_lowerCAmelCase=2 ,_lowerCAmelCase=2 ,_lowerCAmelCase=1.0 ,_lowerCAmelCase=10_24 ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,**_lowerCAmelCase ,):
lowerCamelCase__ = target_bandwidths
lowerCamelCase__ = sampling_rate
lowerCamelCase__ = audio_channels
lowerCamelCase__ = normalize
lowerCamelCase__ = chunk_length_s
lowerCamelCase__ = overlap
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_filters
lowerCamelCase__ = num_residual_layers
lowerCamelCase__ = upsampling_ratios
lowerCamelCase__ = norm_type
lowerCamelCase__ = kernel_size
lowerCamelCase__ = last_kernel_size
lowerCamelCase__ = residual_kernel_size
lowerCamelCase__ = dilation_growth_rate
lowerCamelCase__ = use_causal_conv
lowerCamelCase__ = pad_mode
lowerCamelCase__ = compress
lowerCamelCase__ = num_lstm_layers
lowerCamelCase__ = trim_right_ratio
lowerCamelCase__ = codebook_size
lowerCamelCase__ = codebook_dim if codebook_dim is not None else hidden_size
lowerCamelCase__ = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'''self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}''' )
super().__init__(**_lowerCAmelCase )
@property
def UpperCamelCase_ ( self ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCamelCase_ ( self ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def UpperCamelCase_ ( self ):
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 50
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = DPTConfig(embedding_type="""hybrid""" )
if "large" in checkpoint_url:
lowerCamelCase__ = 1024
lowerCamelCase__ = 4096
lowerCamelCase__ = 24
lowerCamelCase__ = 16
lowerCamelCase__ = [5, 11, 17, 23]
lowerCamelCase__ = [256, 512, 1024, 1024]
lowerCamelCase__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = [256, 512, 768, 768]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = (1, 384, 384)
lowerCamelCase__ = False
lowerCamelCase__ = """project"""
if "ade" in checkpoint_url:
lowerCamelCase__ = True
lowerCamelCase__ = 768
lowerCamelCase__ = [1, 1, 1, 0.5]
lowerCamelCase__ = 150
lowerCamelCase__ = 16
lowerCamelCase__ = """huggingface/label-files"""
lowerCamelCase__ = """ade20k-id2label.json"""
lowerCamelCase__ = json.load(open(cached_download(hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) ) , """r""" ) )
lowerCamelCase__ = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
lowerCamelCase__ = idalabel
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = [1, 150, 480, 480]
return config, expected_shape
def A__ ( __lowerCAmelCase : Optional[int] ):
lowerCamelCase__ = ["""pretrained.model.head.weight""", """pretrained.model.head.bias"""]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__ ( __lowerCAmelCase : List[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.encoder""" )
if "pretrained.model" in name:
lowerCamelCase__ = name.replace("""pretrained.model""" , """dpt.embeddings""" )
if "patch_embed" in name:
lowerCamelCase__ = name.replace("""patch_embed""" , """""" )
if "pos_embed" in name:
lowerCamelCase__ = name.replace("""pos_embed""" , """position_embeddings""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "proj" in name and "project" not in name:
lowerCamelCase__ = name.replace("""proj""" , """projection""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layer""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "norm1" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name and "backbone" not in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "scratch.output_conv" in name:
lowerCamelCase__ = name.replace("""scratch.output_conv""" , """head""" )
if "scratch" in name:
lowerCamelCase__ = name.replace("""scratch""" , """neck""" )
if "layer1_rn" in name:
lowerCamelCase__ = name.replace("""layer1_rn""" , """convs.0""" )
if "layer2_rn" in name:
lowerCamelCase__ = name.replace("""layer2_rn""" , """convs.1""" )
if "layer3_rn" in name:
lowerCamelCase__ = name.replace("""layer3_rn""" , """convs.2""" )
if "layer4_rn" in name:
lowerCamelCase__ = name.replace("""layer4_rn""" , """convs.3""" )
if "refinenet" in name:
lowerCamelCase__ = int(name[len("""neck.refinenet""" ) : len("""neck.refinenet""" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
lowerCamelCase__ = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
lowerCamelCase__ = name.replace("""out_conv""" , """projection""" )
if "resConfUnit1" in name:
lowerCamelCase__ = name.replace("""resConfUnit1""" , """residual_layer1""" )
if "resConfUnit2" in name:
lowerCamelCase__ = name.replace("""resConfUnit2""" , """residual_layer2""" )
if "conv1" in name:
lowerCamelCase__ = name.replace("""conv1""" , """convolution1""" )
if "conv2" in name:
lowerCamelCase__ = name.replace("""conv2""" , """convolution2""" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.0.project.0""" , """neck.reassemble_stage.readout_projects.0.0""" )
if "pretrained.act_postprocess2.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.0.project.0""" , """neck.reassemble_stage.readout_projects.1.0""" )
if "pretrained.act_postprocess3.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.0.project.0""" , """neck.reassemble_stage.readout_projects.2.0""" )
if "pretrained.act_postprocess4.0.project.0" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.0.project.0""" , """neck.reassemble_stage.readout_projects.3.0""" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.3""" , """neck.reassemble_stage.layers.0.projection""" )
if "pretrained.act_postprocess1.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess1.4""" , """neck.reassemble_stage.layers.0.resize""" )
if "pretrained.act_postprocess2.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.3""" , """neck.reassemble_stage.layers.1.projection""" )
if "pretrained.act_postprocess2.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess2.4""" , """neck.reassemble_stage.layers.1.resize""" )
if "pretrained.act_postprocess3.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess3.3""" , """neck.reassemble_stage.layers.2.projection""" )
if "pretrained.act_postprocess4.3" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.3""" , """neck.reassemble_stage.layers.3.projection""" )
if "pretrained.act_postprocess4.4" in name:
lowerCamelCase__ = name.replace("""pretrained.act_postprocess4.4""" , """neck.reassemble_stage.layers.3.resize""" )
if "pretrained" in name:
lowerCamelCase__ = name.replace("""pretrained""" , """dpt""" )
if "bn" in name:
lowerCamelCase__ = name.replace("""bn""" , """batch_norm""" )
if "head" in name:
lowerCamelCase__ = name.replace("""head""" , """head.head""" )
if "encoder.norm" in name:
lowerCamelCase__ = name.replace("""encoder.norm""" , """layernorm""" )
if "auxlayer" in name:
lowerCamelCase__ = name.replace("""auxlayer""" , """auxiliary_head.head""" )
if "backbone" in name:
lowerCamelCase__ = name.replace("""backbone""" , """backbone.bit.encoder""" )
if ".." in name:
lowerCamelCase__ = name.replace("""..""" , """.""" )
if "stem.conv" in name:
lowerCamelCase__ = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
lowerCamelCase__ = name.replace("""blocks""" , """layers""" )
if "convolution" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""convolution""" , """conv""" )
if "layer" in name and "backbone" in name:
lowerCamelCase__ = name.replace("""layer""" , """layers""" )
if "backbone.bit.encoder.bit" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.bit""" , """backbone.bit""" )
if "embedder.conv" in name:
lowerCamelCase__ = name.replace("""embedder.conv""" , """embedder.convolution""" )
if "backbone.bit.encoder.stem.norm" in name:
lowerCamelCase__ = name.replace("""backbone.bit.encoder.stem.norm""" , """backbone.bit.embedder.norm""" )
return name
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : int ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
lowerCamelCase__ = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase__ = in_proj_weight[: config.hidden_size, :]
lowerCamelCase__ = in_proj_bias[: config.hidden_size]
lowerCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowerCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase__ = in_proj_bias[-config.hidden_size :]
def A__ ( ):
lowerCamelCase__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : Any ):
lowerCamelCase__ , lowerCamelCase__ = get_dpt_config(__lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
lowerCamelCase__ = torch.load(__lowerCAmelCase , map_location="""cpu""" )
# remove certain keys
remove_ignore_keys_(__lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__lowerCAmelCase )
lowerCamelCase__ = val
# read in qkv matrices
read_in_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
lowerCamelCase__ = DPTForSemanticSegmentation(__lowerCAmelCase ) if """ade""" in checkpoint_url else DPTForDepthEstimation(__lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
# Check outputs on an image
lowerCamelCase__ = 480 if """ade""" in checkpoint_url else 384
lowerCamelCase__ = DPTImageProcessor(size=__lowerCAmelCase )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""pt""" )
# forward pass
lowerCamelCase__ = model(**__lowerCAmelCase ).logits if """ade""" in checkpoint_url else model(**__lowerCAmelCase ).predicted_depth
if show_prediction:
lowerCamelCase__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="""bicubic""" , align_corners=__lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
image_processor.push_to_hub("""ybelkada/dpt-hybrid-midas""" )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
UpperCamelCase : List[str] = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 50
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = [
'small',
'small-base',
'medium',
'medium-base',
'intermediate',
'intermediate-base',
'large',
'large-base',
'xlarge',
'xlarge-base',
]
__UpperCAmelCase = {
'vocab_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json',
'funnel-transformer/small-base': (
'https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'
),
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json',
'funnel-transformer/medium-base': (
'https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json',
'funnel-transformer/large-base': (
'https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'
),
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json',
'funnel-transformer/xlarge-base': (
'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {F"""funnel-transformer/{name}""": 5_12 for name in _model_names}
__UpperCAmelCase = {F"""funnel-transformer/{name}""": {'do_lower_case': True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :int = VOCAB_FILES_NAMES
UpperCAmelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ :Dict = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase_ :int = FunnelTokenizer
UpperCAmelCase_ :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ :int = 2
def __init__( self , __A=None , __A=None , __A=True , __A="<unk>" , __A="<sep>" , __A="<pad>" , __A="<cls>" , __A="<mask>" , __A="<s>" , __A="</s>" , __A=True , __A=True , __A=None , __A="##" , **__A , ) -> Optional[Any]:
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , bos_token=__A , eos_token=__A , clean_text=__A , tokenize_chinese_chars=__A , strip_accents=__A , wordpieces_prefix=__A , **__A , )
lowerCAmelCase_ :List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __A ) != tokenize_chinese_chars
):
lowerCAmelCase_ :Any = getattr(__A , normalizer_state.pop("""type""" ) )
lowerCAmelCase_ :Any = do_lower_case
lowerCAmelCase_ :List[Any] = strip_accents
lowerCAmelCase_ :Optional[Any] = tokenize_chinese_chars
lowerCAmelCase_ :Optional[int] = normalizer_class(**__A )
lowerCAmelCase_ :Tuple = do_lower_case
def __lowerCAmelCase ( self , __A , __A=None ) -> List[str]:
lowerCAmelCase_ :Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCAmelCase ( self , __A , __A = None ) -> List[int]:
lowerCAmelCase_ :Dict = [self.sep_token_id]
lowerCAmelCase_ :Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCAmelCase ( self , __A , __A = None ) -> Tuple[str]:
lowerCAmelCase_ :Union[str, Any] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
| 256
| 0
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
UpperCAmelCase__ = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase__ :
__a = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__a = field(
default=A_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__a = field(
default=A_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__a = field(
default=A_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__a = field(
default=A_ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__a = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__a = field(
default=A_ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
@dataclass
class lowerCAmelCase__ :
__a = field(default=A_ , metadata={"""help""": """The input training data file (a text file)."""} )
__a = field(
default=A_ , metadata={"""help""": """An optional input evaluation data file to evaluate the perplexity on (a text file)."""} , )
__a = field(
default=A_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
__a = field(
default=A_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
__a = field(
default=A_ , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. If passed, sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__a = field(
default=A_ , metadata={
"""help""": (
"""Whether to pad all samples to the maximum sentence length. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch. More """
"""efficient on GPU but very bad for TPU."""
)
} , )
__a = field(
default=A_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__a = field(
default=A_ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
def lowercase ( self : List[str] ):
if self.train_file is not None:
_snake_case = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_snake_case = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCAmelCase__ :
__a = 42
__a = True
__a = None
__a = None
def __call__( self : Dict , _lowerCamelCase : Dict ):
_snake_case = '''label''' if '''label''' in features[0].keys() else '''labels'''
_snake_case = [feature.pop(_lowerCamelCase ) for feature in features]
_snake_case = len(_lowerCamelCase )
_snake_case = len(features[0]['''input_ids'''] )
_snake_case = [
[{k: v[i] for k, v in feature.items()} for i in range(_lowerCamelCase )] for feature in features
]
_snake_case = list(chain(*_lowerCamelCase ) )
_snake_case = self.tokenizer.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
_snake_case = {k: v.view(_lowerCamelCase , _lowerCamelCase , -1 ) for k, v in batch.items()}
# Add back labels
_snake_case = torch.tensor(_lowerCamelCase , dtype=torch.intaa )
return batch
def _UpperCAmelCase ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case , _snake_case , _snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case , _snake_case , _snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __lowerCamelCase , __lowerCamelCase )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_snake_case = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
datasets.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_snake_case = {}
if data_args.train_file is not None:
_snake_case = data_args.train_file
if data_args.validation_file is not None:
_snake_case = data_args.validation_file
_snake_case = data_args.train_file.split('''.''' )[-1]
_snake_case = load_dataset(
__lowerCamelCase , data_files=__lowerCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_snake_case = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_snake_case = [f'''ending{i}''' for i in range(4 )]
_snake_case = '''sent1'''
_snake_case = '''sent2'''
if data_args.max_seq_length is None:
_snake_case = tokenizer.model_max_length
if max_seq_length > 10_24:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
_snake_case = 10_24
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
_snake_case = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__lowerCamelCase : Any ):
_snake_case = [[context] * 4 for context in examples[context_name]]
_snake_case = examples[question_header_name]
_snake_case = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__lowerCamelCase )
]
# Flatten out
_snake_case = list(chain(*__lowerCamelCase ) )
_snake_case = list(chain(*__lowerCamelCase ) )
# Tokenize
_snake_case = tokenizer(
__lowerCamelCase , __lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__lowerCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
_snake_case = raw_datasets['''train''']
if data_args.max_train_samples is not None:
_snake_case = min(len(__lowerCamelCase ) , data_args.max_train_samples )
_snake_case = train_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
_snake_case = train_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
_snake_case = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
_snake_case = min(len(__lowerCamelCase ) , data_args.max_eval_samples )
_snake_case = eval_dataset.select(range(__lowerCamelCase ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
_snake_case = eval_dataset.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_snake_case = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__lowerCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__lowerCamelCase : Dict ):
_snake_case , _snake_case = eval_predictions
_snake_case = np.argmax(__lowerCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
_snake_case = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , compute_metrics=__lowerCamelCase , )
# Training
if training_args.do_train:
_snake_case = None
if training_args.resume_from_checkpoint is not None:
_snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_snake_case = last_checkpoint
_snake_case = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case = train_result.metrics
_snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__lowerCamelCase )
)
_snake_case = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''train''' , __lowerCamelCase )
trainer.save_metrics('''train''' , __lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_snake_case = trainer.evaluate()
_snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__lowerCamelCase )
_snake_case = min(__lowerCamelCase , len(__lowerCamelCase ) )
trainer.log_metrics('''eval''' , __lowerCamelCase )
trainer.save_metrics('''eval''' , __lowerCamelCase )
_snake_case = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 224
|
"""simple docstring"""
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _UpperCAmelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple=True , __lowerCamelCase : int="pt" ) -> int:
_snake_case = {'''add_prefix_space''': True} if isinstance(__lowerCamelCase , __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
_snake_case = padding_side
return tokenizer(
[line] , max_length=__lowerCamelCase , padding='''max_length''' if pad_to_max_length else None , truncation=__lowerCamelCase , return_tensors=__lowerCamelCase , add_special_tokens=__lowerCamelCase , **__lowerCamelCase , )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Any , __lowerCamelCase : List[Any]=None , ) -> Any:
_snake_case = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase__ ( A_ ):
def __init__( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : Any , _lowerCamelCase : str="train" , _lowerCamelCase : List[str]=None , _lowerCamelCase : List[str]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Union[str, Any]="" , ):
super().__init__()
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.source''' )
_snake_case = Path(_lowerCamelCase ).joinpath(type_path + '''.target''' )
_snake_case = self.get_char_lens(self.src_file )
_snake_case = max_source_length
_snake_case = max_target_length
assert min(self.src_lens ) > 0, f'''found empty line in {self.src_file}'''
_snake_case = tokenizer
_snake_case = prefix
if n_obs is not None:
_snake_case = self.src_lens[:n_obs]
_snake_case = src_lang
_snake_case = tgt_lang
def __len__( self : List[str] ):
return len(self.src_lens )
def __getitem__( self : str , _lowerCamelCase : List[str] ):
_snake_case = index + 1 # linecache starts at 1
_snake_case = self.prefix + linecache.getline(str(self.src_file ) , _lowerCamelCase ).rstrip('''\n''' )
_snake_case = linecache.getline(str(self.tgt_file ) , _lowerCamelCase ).rstrip('''\n''' )
assert source_line, f'''empty source line for index {index}'''
assert tgt_line, f'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _lowerCamelCase ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_snake_case = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
)
_snake_case = self.tokenizer.generator if isinstance(self.tokenizer , _lowerCamelCase ) else self.tokenizer
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_source_length , '''right''' )
_snake_case = encode_line(_lowerCamelCase , _lowerCamelCase , self.max_target_length , '''right''' )
_snake_case = source_inputs['''input_ids'''].squeeze()
_snake_case = target_inputs['''input_ids'''].squeeze()
_snake_case = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowercase ( _lowerCamelCase : Optional[Any] ):
return [len(_lowerCamelCase ) for x in Path(_lowerCamelCase ).open().readlines()]
def lowercase ( self : int , _lowerCamelCase : int ):
_snake_case = torch.stack([x['''input_ids'''] for x in batch] )
_snake_case = torch.stack([x['''attention_mask'''] for x in batch] )
_snake_case = torch.stack([x['''decoder_input_ids'''] for x in batch] )
_snake_case = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _lowerCamelCase )
else self.tokenizer.pad_token_id
)
_snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase )
_snake_case , _snake_case = trim_batch(_lowerCamelCase , _lowerCamelCase , attention_mask=_lowerCamelCase )
_snake_case = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
UpperCAmelCase__ = getLogger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : List[List] ) -> Any:
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : str ) -> None:
_snake_case = get_git_info()
save_json(__lowerCamelCase , os.path.join(__lowerCamelCase , '''git_log.json''' ) )
def _UpperCAmelCase ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any]=4 , **__lowerCamelCase : Union[str, Any] ) -> str:
with open(__lowerCamelCase , '''w''' ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase , indent=__lowerCamelCase , **__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Union[str, Any]:
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def _UpperCAmelCase ( ) -> str:
_snake_case = git.Repo(search_parent_directories=__lowerCamelCase )
_snake_case = {
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def _UpperCAmelCase ( __lowerCamelCase : Callable , __lowerCamelCase : Iterable ) -> List:
return list(map(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Tuple ) -> Tuple:
with open(__lowerCamelCase , '''wb''' ) as f:
return pickle.dump(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
def remove_articles(__lowerCamelCase : Tuple ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , __lowerCamelCase )
def white_space_fix(__lowerCamelCase : int ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase : List[Any] ):
_snake_case = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase : List[str] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[Any] ) -> Any:
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = normalize_answer(__lowerCamelCase ).split()
_snake_case = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
_snake_case = sum(common.values() )
if num_same == 0:
return 0
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = 1.0 * num_same / len(__lowerCamelCase )
_snake_case = (2 * precision * recall) / (precision + recall)
return fa
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int ) -> Any:
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ) -> Dict:
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_snake_case = 0
for hypo, pred in zip(__lowerCamelCase , __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase , __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[int]:
return model_prefix.startswith('''rag''' )
def _UpperCAmelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
_snake_case = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_snake_case = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if not hasattr(__lowerCamelCase , __lowerCamelCase ) and not hasattr(__lowerCamelCase , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
continue
_snake_case = p if hasattr(__lowerCamelCase , __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase , __lowerCamelCase , getattr(__lowerCamelCase , __lowerCamelCase ) )
delattr(__lowerCamelCase , __lowerCamelCase )
return hparams, config
| 224
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE : str = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE : Dict = logging.WARNING
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase_ :int = os.getenv('''DATASETS_VERBOSITY''' , _a )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def UpperCamelCase ( ) -> str:
'''simple docstring'''
return __name__.split('''.''' )[0]
def UpperCamelCase ( ) -> logging.Logger:
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Dict = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :List[str] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCamelCase ( _a = None ) -> logging.Logger:
'''simple docstring'''
if name is None:
lowercase_ :Union[str, Any] = _get_library_name()
return logging.getLogger(_a )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase ( _a ) -> None:
'''simple docstring'''
_get_library_root_logger().setLevel(_a )
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
return set_verbosity(_a )
def UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
return set_verbosity(_a )
def UpperCamelCase ( ) -> int:
'''simple docstring'''
return set_verbosity(_a )
def UpperCamelCase ( ) -> Dict:
'''simple docstring'''
return set_verbosity(_a )
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :int = False
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase_ :Optional[int] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class UpperCamelCase :
'''simple docstring'''
def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ): # pylint: disable=unused-argument
lowercase_ :Tuple = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , UpperCamelCase_ ):
def empty_fn(*UpperCamelCase_ , **UpperCamelCase_ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
return
SCREAMING_SNAKE_CASE : Optional[int] = True
class UpperCamelCase :
'''simple docstring'''
def __call__( self , *UpperCamelCase_ , UpperCamelCase_=False , **UpperCamelCase_ ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*UpperCamelCase_ , **UpperCamelCase_ )
else:
return EmptyTqdm(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ):
lowercase_ :int = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE : Optional[int] = _tqdm_cls()
def UpperCamelCase ( ) -> bool:
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase ( ) -> str:
'''simple docstring'''
global _tqdm_active
lowercase_ :str = True
def UpperCamelCase ( ) -> str:
'''simple docstring'''
global _tqdm_active
lowercase_ :Optional[int] = False
| 712
|
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
assert isinstance(_a , _a ), f"The input value of [n={number}] is not an integer"
if number == 1:
return 2
elif number < 1:
lowercase_ :str = f"The input value of [n={number}] has to be > 0"
raise ValueError(_a )
else:
lowercase_ :List[str] = sylvester(number - 1 )
lowercase_ :Union[str, Any] = num - 1
lowercase_ :List[str] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"The 8th number in Sylvester's sequence: {sylvester(8)}")
| 441
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
A_ : str = logging.get_logger(__name__)
A_ : Dict = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A_ : Optional[int] = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
A_ : Union[str, Any] = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
A_ : Tuple = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =VOCAB_FILES_NAMES
a : Any =PRETRAINED_VOCAB_FILES_MAP
a : List[Any] =PRETRAINED_INIT_CONFIGURATION
a : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] =RealmTokenizer
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase="[UNK]" , _lowerCamelCase="[SEP]" , _lowerCamelCase="[PAD]" , _lowerCamelCase="[CLS]" , _lowerCamelCase="[MASK]" , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
_lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _lowerCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _lowerCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _lowerCamelCase ) != tokenize_chinese_chars
):
UpperCamelCase_: Tuple = getattr(_lowerCamelCase , normalizer_state.pop('type' ) )
UpperCamelCase_: Any = do_lower_case
UpperCamelCase_: Tuple = strip_accents
UpperCamelCase_: Optional[Any] = tokenize_chinese_chars
UpperCamelCase_: List[str] = normalizer_class(**_lowerCamelCase )
UpperCamelCase_: Optional[Any] = do_lower_case
def _a ( self , _lowerCamelCase , **_lowerCamelCase ):
UpperCamelCase_: Optional[Any] = PaddingStrategy.MAX_LENGTH
UpperCamelCase_: Any = text
UpperCamelCase_: List[Any] = kwargs.pop('text_pair' , _lowerCamelCase )
UpperCamelCase_: Optional[int] = kwargs.pop('return_tensors' , _lowerCamelCase )
UpperCamelCase_: Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_lowerCamelCase ):
if batch_text_pair is not None:
UpperCamelCase_: Union[str, Any] = batch_text_pair[idx]
else:
UpperCamelCase_: Any = None
UpperCamelCase_: Any = super().__call__(_lowerCamelCase , _lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
UpperCamelCase_: Dict = encoded_candidates.get('input_ids' )
UpperCamelCase_: str = encoded_candidates.get('attention_mask' )
UpperCamelCase_: int = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_lowerCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_lowerCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_lowerCamelCase )
UpperCamelCase_: Dict = {key: item for key, item in output_data.items() if len(_lowerCamelCase ) != 0}
return BatchEncoding(_lowerCamelCase , tensor_type=_lowerCamelCase )
def _a ( self , _lowerCamelCase , _lowerCamelCase=None ):
UpperCamelCase_: Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Union[str, Any] = [self.sep_token_id]
UpperCamelCase_: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , _lowerCamelCase , _lowerCamelCase = None ):
UpperCamelCase_: Tuple = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 57
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
A__ : Tuple = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _lowerCAmelCase ( ):
"""simple docstring"""
_lowercase: Optional[int] = Github(os.environ['''GITHUB_TOKEN'''] )
_lowercase: Optional[int] = g.get_repo('''huggingface/diffusers''' )
_lowercase: Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
_lowercase: List[Any] = sorted(issue.get_comments() , key=lambda _UpperCamelCase : i.created_at , reverse=_UpperCamelCase )
_lowercase: Optional[Any] = comments[0] if len(_UpperCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 353
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
__A : List[str] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[str] ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =[label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
if len(UpperCamelCase__ ) == 0 or len(UpperCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(UpperCamelCase__ ) )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any =[sequences]
A__ : Optional[int] =[]
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(UpperCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_UpperCamelCase)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : List[Any]=ZeroShotClassificationArgumentHandler() , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
A__ : List[str] =args_parser
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def _UpperCAmelCase ( self : Tuple ):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : Tuple=TruncationStrategy.ONLY_FIRST , **UpperCamelCase__ : str ):
A__ : Dict =self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
A__ : int =self.tokenizer.eos_token
try:
A__ : Optional[int] =self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , )
except Exception as e:
if "too short" in str(UpperCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
A__ : Union[str, Any] =self.tokenizer(
UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def _UpperCAmelCase ( self : str , **UpperCamelCase__ : int ):
if kwargs.get("multi_class" , UpperCamelCase__ ) is not None:
A__ : Union[str, Any] =kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
A__ : Any ={}
if "candidate_labels" in kwargs:
A__ : List[str] =self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
A__ : Optional[int] =kwargs["hypothesis_template"]
A__ : List[str] ={}
if "multi_label" in kwargs:
A__ : Dict =kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : Tuple , UpperCamelCase__ : Union[str, List[str]] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : int , ):
if len(UpperCamelCase__ ) == 0:
pass
elif len(UpperCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
A__ : Optional[Any] =args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Tuple="This example is {}." ):
A__ , A__ : Optional[Any] =self._args_parser(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(UpperCamelCase__ , UpperCamelCase__ ) ):
A__ : Tuple =self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(UpperCamelCase__ ) - 1,
**model_input,
}
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int ):
A__ : Optional[Any] =inputs["candidate_label"]
A__ : str =inputs["sequence"]
A__ : str ={k: inputs[k] for k in self.tokenizer.model_input_names}
A__ : Tuple =self.model(**UpperCamelCase__ )
A__ : Any ={
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str=False ):
A__ : Any =[outputs["candidate_label"] for outputs in model_outputs]
A__ : Optional[int] =[outputs["sequence"] for outputs in model_outputs]
A__ : Optional[int] =np.concatenate([output["logits"].numpy() for output in model_outputs] )
A__ : Tuple =logits.shape[0]
A__ : Dict =len(UpperCamelCase__ )
A__ : List[Any] =N // n
A__ : Optional[Any] =logits.reshape((num_sequences, n, -1) )
if multi_label or len(UpperCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
A__ : Optional[Any] =self.entailment_id
A__ : Optional[Any] =-1 if entailment_id == 0 else 0
A__ : Optional[int] =reshaped_outputs[..., [contradiction_id, entailment_id]]
A__ : List[str] =np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
A__ : Any =scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
A__ : Tuple =reshaped_outputs[..., self.entailment_id]
A__ : Tuple =np.exp(UpperCamelCase__ ) / np.exp(UpperCamelCase__ ).sum(-1 , keepdims=UpperCamelCase__ )
A__ : List[str] =list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 595
|
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Union[str, Any] = {"vocab_file": "vocab.txt"}
__A : Dict = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
__A : List[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Any =collections.OrderedDict()
with open(UpperCamelCase , "r" , encoding="utf-8" ) as reader:
A__ : Tuple =reader.readlines()
for index, token in enumerate(UpperCamelCase ):
A__ : List[str] =token.rstrip("\n" )
A__ : Union[str, Any] =index
return vocab
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any]="<unk>" , UpperCamelCase__ : int=200 ):
A__ : List[Any] =vocab
A__ : List[str] =unk_token
A__ : Union[str, Any] =max_input_chars_per_word
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : int ):
A__ : Union[str, Any] =list(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.max_input_chars_per_word:
return [self.unk_token]
A__ : List[str] =0
A__ : List[str] =[]
while start < len(UpperCamelCase__ ):
A__ : int =len(UpperCamelCase__ )
A__ : Optional[int] =None
while start < end:
A__ : Optional[int] ="".join(chars[start:end] )
if substr in self.vocab:
A__ : List[str] =substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(UpperCamelCase__ )
A__ : Dict =end
return sub_tokens
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = VOCAB_FILES_NAMES
__magic_name__ : Dict = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[int] = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[Any] = False
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str="<d>" , UpperCamelCase__ : Optional[int]="</d>" , UpperCamelCase__ : Optional[Any]="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[Any]="<pad>" , UpperCamelCase__ : Tuple="<unk>" , UpperCamelCase__ : Optional[Any]="</n>" , UpperCamelCase__ : Dict="</_>" , UpperCamelCase__ : Optional[int]="left" , **UpperCamelCase__ : Tuple , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=UpperCamelCase__ , eod_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , line_token=UpperCamelCase__ , space_token=UpperCamelCase__ , padding_side=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : Union[str, Any] =bod_token
A__ : str =eod_token
A__ : Any =load_vocab(UpperCamelCase__ )
A__ : Dict =self.encoder[space_token]
A__ : Any =self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
A__ : Any =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
A__ : int ={v: k for k, v in self.encoder.items()}
A__ : Union[str, Any] =WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.encoder[self.bod_token]
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.encoder[self.eod_token]
@property
def _UpperCAmelCase ( self : List[Any] ):
return self.encoder["\n"]
@property
def _UpperCAmelCase ( self : Tuple ):
return len(self.encoder )
def _UpperCAmelCase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for x in jieba.cut(UpperCamelCase__ , cut_all=UpperCamelCase__ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(UpperCamelCase__ ) )
return output_tokens
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str] ):
A__ : int =[i for i in token_ids if i >= 0]
A__ : List[Any] =[
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[Any] ):
return token in self.encoder
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : List[str] ):
return "".join(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Any ):
return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Dict ):
return self.decoder.get(UpperCamelCase__ , self.unk_token )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if os.path.isdir(UpperCamelCase__ ):
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
A__ : Dict =(filename_prefix + "-" if filename_prefix else "") + save_directory
A__ : Any =0
if " " in self.encoder:
A__ : Any =self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
A__ : Any =self.encoder["\n"]
del self.encoder["\n"]
A__ : Tuple =collections.OrderedDict(sorted(self.encoder.items() , key=lambda UpperCamelCase__ : x[1] ) )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
A__ : Dict =token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ ))
return [1] + ([0] * len(UpperCamelCase__ ))
| 595
| 1
|
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( A : int = 3 ) -> qiskit.result.counts.Counts:
if isinstance(A , A ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(A ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 1_0:
raise ValueError('''number of qubits too large to simulate(>10).''' )
UpperCAmelCase_ : Dict = QuantumRegister(A , '''qr''' )
UpperCAmelCase_ : Union[str, Any] = ClassicalRegister(A , '''cr''' )
UpperCAmelCase_ : Dict = QuantumCircuit(A , A )
UpperCAmelCase_ : Any = number_of_qubits
for i in range(A ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(A ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , A , A )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(A , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(A , A )
# simulate with 10000 shots
UpperCAmelCase_ : List[Any] = Aer.get_backend('''qasm_simulator''' )
UpperCAmelCase_ : List[str] = execute(A , A , shots=1_0_0_0_0 )
return job.result().get_counts(A )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 541
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
_UpperCamelCase : Dict = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class snake_case__ ( UpperCamelCase):
a_ = "mvp"
a_ = ["past_key_values"]
a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , _A : List[str]=5_02_67 , _A : Dict=10_24 , _A : List[str]=12 , _A : Dict=40_96 , _A : Optional[int]=16 , _A : Optional[Any]=12 , _A : int=40_96 , _A : Dict=16 , _A : Union[str, Any]=0.0 , _A : Optional[Any]=0.0 , _A : int="gelu" , _A : Union[str, Any]=10_24 , _A : int=0.1 , _A : Optional[Any]=0.0 , _A : Optional[Any]=0.0 , _A : Optional[Any]=0.02 , _A : Optional[Any]=0.0 , _A : str=False , _A : List[Any]=True , _A : Optional[int]=1 , _A : Tuple=0 , _A : Dict=2 , _A : Dict=True , _A : int=2 , _A : Tuple=2 , _A : List[str]=False , _A : Any=1_00 , _A : Union[str, Any]=8_00 , **_A : List[str] , ) -> int:
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : int = max_position_embeddings
UpperCAmelCase_ : Dict = d_model
UpperCAmelCase_ : Dict = encoder_ffn_dim
UpperCAmelCase_ : Optional[Any] = encoder_layers
UpperCAmelCase_ : Tuple = encoder_attention_heads
UpperCAmelCase_ : List[Any] = decoder_ffn_dim
UpperCAmelCase_ : List[Any] = decoder_layers
UpperCAmelCase_ : Dict = decoder_attention_heads
UpperCAmelCase_ : str = dropout
UpperCAmelCase_ : Optional[Any] = attention_dropout
UpperCAmelCase_ : Optional[Any] = activation_dropout
UpperCAmelCase_ : Union[str, Any] = activation_function
UpperCAmelCase_ : Union[str, Any] = init_std
UpperCAmelCase_ : Optional[int] = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Dict = classifier_dropout
UpperCAmelCase_ : List[Any] = use_cache
UpperCAmelCase_ : Dict = encoder_layers
UpperCAmelCase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCAmelCase_ : int = use_prompt
UpperCAmelCase_ : Tuple = prompt_length
UpperCAmelCase_ : str = prompt_mid_dim
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , _A ):
UpperCAmelCase_ : Optional[Any] = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 541
| 1
|
'''simple docstring'''
snake_case_ = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
snake_case_ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
snake_case_ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 715
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
class _lowercase ( a ):
_UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=125 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
A : str = [f'''<extra_id_{i}>''' for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
A : List[str] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
''' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'''
''' extra_ids tokens''' )
A : str = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else pad_token
A : Any = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else eos_token
A : Optional[Any] = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else unk_token
super().__init__(
eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
A : int = extra_ids
A : Any = 2**8 # utf is 8 bits
# define special tokens dict
A : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
A : Tuple = len(self.special_tokens_encoder )
A : Union[str, Any] = len(_UpperCAmelCase )
for i, token in enumerate(_UpperCAmelCase ):
A : Optional[Any] = self.vocab_size + i - n
A : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def snake_case ( self ):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_UpperCAmelCase )) + [1]
return ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def snake_case ( self , _UpperCAmelCase ):
if len(_UpperCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Optional[Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
A : Union[str, Any] = self._add_eos_if_not_present(_UpperCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
A : Tuple = self._add_eos_if_not_present(_UpperCAmelCase )
return token_ids_a + token_ids_a
def snake_case ( self , _UpperCAmelCase ):
A : str = [chr(_UpperCAmelCase ) for i in text.encode('''utf-8''' )]
return tokens
def snake_case ( self , _UpperCAmelCase ):
if token in self.special_tokens_encoder:
A : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
A : int = self.added_tokens_encoder[token]
elif len(_UpperCAmelCase ) != 1:
A : Union[str, Any] = self.unk_token_id
else:
A : Union[str, Any] = ord(_UpperCAmelCase ) + self._num_special_tokens
return token_id
def snake_case ( self , _UpperCAmelCase ):
if index in self.special_tokens_decoder:
A : Optional[Any] = self.special_tokens_decoder[index]
else:
A : Dict = chr(index - self._num_special_tokens )
return token
def snake_case ( self , _UpperCAmelCase ):
A : List[str] = B''''''
for token in tokens:
if token in self.special_tokens_decoder:
A : List[str] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.added_tokens_decoder:
A : Optional[int] = self.special_tokens_decoder[token].encode('''utf-8''' )
elif token in self.special_tokens_encoder:
A : Union[str, Any] = token.encode('''utf-8''' )
elif token in self.added_tokens_encoder:
A : str = token.encode('''utf-8''' )
else:
A : Tuple = bytes([ord(_UpperCAmelCase )] )
bstring += tok_string
A : List[Any] = bstring.decode('''utf-8''' , errors='''ignore''' )
return string
def snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
return ()
| 537
| 0
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase__ ( __magic_name__ : bool = True , *__magic_name__ : str , **__magic_name__ : int ):
'''simple docstring'''
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
lowerCAmelCase : Tuple = False
if main_process_only:
lowerCAmelCase : Union[str, Any] = PartialState().local_process_index == 0
return _tqdm(*__magic_name__ , **__magic_name__ , disable=__magic_name__ )
| 348
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__SCREAMING_SNAKE_CASE : Optional[int] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase__ ( __magic_name__ : Tuple ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Tuple = False
elif args.student_type == "gpt2":
lowerCAmelCase : Optional[int] = False
def UpperCAmelCase__ ( __magic_name__ : Tuple , __magic_name__ : int ):
'''simple docstring'''
if args.student_type == "roberta":
lowerCAmelCase : Optional[Any] = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase : Tuple = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=__magic_name__ , required=__magic_name__ , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=__magic_name__ , required=__magic_name__ , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=__magic_name__ , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=__magic_name__ , required=__magic_name__ , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=__magic_name__ , type=__magic_name__ , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=__magic_name__ , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=__magic_name__ , required=__magic_name__ , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=__magic_name__ , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=__magic_name__ , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=__magic_name__ , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=__magic_name__ , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=__magic_name__ , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=__magic_name__ , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=__magic_name__ , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=__magic_name__ , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=__magic_name__ , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=__magic_name__ , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=__magic_name__ , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__magic_name__ , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=__magic_name__ , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=__magic_name__ , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5e-4 , type=__magic_name__ , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1e-6 , type=__magic_name__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=__magic_name__ , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=__magic_name__ , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=__magic_name__ , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=__magic_name__ , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=__magic_name__ , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=__magic_name__ , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=__magic_name__ , default=5_00 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=__magic_name__ , default=40_00 , help='''Checkpoint interval.''' )
lowerCAmelCase : List[Any] = parser.parse_args()
sanity_checks(__magic_name__ )
# ARGS #
init_gpu_params(__magic_name__ )
set_seed(__magic_name__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(__magic_name__ ) , __magic_name__ , indent=4 )
git_log(args.dump_path )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = MODEL_CLASSES[args.student_type]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
lowerCAmelCase : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
lowerCAmelCase : Union[str, Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
lowerCAmelCase : List[Any] = tokenizer.all_special_tokens.index(__magic_name__ )
lowerCAmelCase : Dict = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
lowerCAmelCase : Optional[int] = special_tok_ids
lowerCAmelCase : Any = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , '''rb''' ) as fp:
lowerCAmelCase : Optional[int] = pickle.load(__magic_name__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , '''rb''' ) as fp:
lowerCAmelCase : Tuple = pickle.load(__magic_name__ )
lowerCAmelCase : Tuple = np.maximum(__magic_name__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
lowerCAmelCase : List[str] = 0.0 # do not predict special tokens
lowerCAmelCase : Any = torch.from_numpy(__magic_name__ )
else:
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = LmSeqsDataset(params=__magic_name__ , data=__magic_name__ )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
lowerCAmelCase : Tuple = student_config_class.from_pretrained(args.student_config )
lowerCAmelCase : str = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
lowerCAmelCase : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=__magic_name__ )
else:
lowerCAmelCase : str = student_model_class(__magic_name__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info('''Student loaded.''' )
# TEACHER #
lowerCAmelCase : str = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__magic_name__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__magic_name__ , __magic_name__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__magic_name__ , __magic_name__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
lowerCAmelCase : List[Any] = Distiller(
params=__magic_name__ , dataset=__magic_name__ , token_probs=__magic_name__ , student=__magic_name__ , teacher=__magic_name__ )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 348
| 1
|
'''simple docstring'''
import sys
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Tuple ) -> List[Any]:
"""simple docstring"""
__a = len(SCREAMING_SNAKE_CASE__ )
__a = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
__a = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2, SCREAMING_SNAKE_CASE__ ):
for a in range(1, n - chain_length + 1 ):
__a = a + chain_length - 1
__a = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__a = cost
__a = c
return matrix, sol
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: List[Any], SCREAMING_SNAKE_CASE__: List[Any] ) -> Optional[Any]:
"""simple docstring"""
if i == j:
print('A' + str(SCREAMING_SNAKE_CASE__ ), end=' ' )
else:
print('(', end=' ' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__, optimal_solution[i][j] + 1, SCREAMING_SNAKE_CASE__ )
print(')', end=' ' )
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__a = [30, 35, 15, 5, 10, 20, 25]
__a = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__a , __a = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__, 1, n - 1 )
if __name__ == "__main__":
main()
| 270
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , unittest.TestCase ):
__a =XLMRobertaTokenizer
__a =XLMRobertaTokenizerFast
__a =True
__a =True
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__a = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCamelCase ( self ) ->List[Any]:
'''simple docstring'''
__a = '<pad>'
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase ) , 1002 )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1002 )
def __UpperCamelCase ( self ) ->int:
'''simple docstring'''
__a = XLMRobertaTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__a = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__a = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
__a = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
def __UpperCamelCase ( self ) ->Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__a = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-xlm-roberta', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__a = self.rust_tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = self.tokenizer_class.from_pretrained(lowerCamelCase , **lowerCamelCase )
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
__a = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase , lowerCamelCase )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__a = tempfile.mkdtemp()
__a = tokenizer_r.save_pretrained(lowerCamelCase , legacy_format=lowerCamelCase )
__a = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__a = tokenizer_r.from_pretrained(lowerCamelCase )
__a = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase , lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@cached_property
def __UpperCamelCase ( self ) ->List[str]:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained('xlm-roberta-base' )
def __UpperCamelCase ( self ) ->Union[str, Any]:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
__a = XLMRobertaTokenizer(f.name , keep_accents=lowerCamelCase )
__a = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = 'I was born in 92000, and this is falsé.'
__a = tokenizer.tokenize(lowerCamelCase )
__a = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(lowerCamelCase )
__a = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def __UpperCamelCase ( self ) ->Optional[Any]:
'''simple docstring'''
__a = 'Hello World!'
__a = [0, 3_5378, 6661, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __UpperCamelCase ( self ) ->Optional[int]:
'''simple docstring'''
__a = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
__a = [
0,
3293,
83,
10,
4552,
4989,
7986,
678,
10,
5915,
111,
17_9459,
12_4850,
4,
6044,
237,
12,
6,
5,
6,
4,
6780,
705,
15,
1388,
44,
378,
1_0114,
711,
152,
20,
6,
5,
2_2376,
642,
1221,
1_5190,
3_4153,
450,
5608,
959,
1119,
5_7702,
136,
186,
47,
1098,
2_9367,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
6044,
237,
6284,
5_0901,
528,
31,
90,
34,
927,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __UpperCamelCase ( self ) ->Tuple:
'''simple docstring'''
# fmt: off
__a = {'input_ids': [[0, 1_1062, 8_2772, 7, 15, 8_2772, 538, 5_1529, 237, 1_7198, 1290, 206, 9, 21_5175, 1314, 136, 1_7198, 1290, 206, 9, 5_6359, 42, 12_2009, 9, 1_6466, 16, 8_7344, 4537, 9, 4717, 7_8381, 6, 15_9958, 7, 15, 2_4480, 618, 4, 527, 2_2693, 5428, 4, 2777, 2_4480, 9874, 4, 4_3523, 594, 4, 803, 1_8392, 3_3189, 18, 4, 4_3523, 2_4447, 1_2399, 100, 2_4955, 8_3658, 9626, 14_4057, 15, 839, 2_2335, 16, 136, 2_4955, 8_3658, 8_3479, 15, 3_9102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 12_2009, 11_5774, 23, 805, 1328, 4_6876, 7, 136, 5_3894, 1940, 4_2227, 4_1159, 1_7721, 823, 425, 4, 2_7512, 9_8722, 206, 136, 5531, 4970, 919, 1_7336, 5, 2], [0, 2_0080, 618, 83, 8_2775, 47, 479, 9, 1517, 73, 5_3894, 333, 8_0581, 11_0117, 1_8811, 5256, 1295, 51, 15_2526, 297, 7986, 390, 12_4416, 538, 3_5431, 214, 98, 1_5044, 2_5737, 136, 7108, 4_3701, 23, 756, 13_5355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 581, 6_3773, 11_9455, 6, 14_7797, 8_8203, 7, 645, 70, 21, 3285, 1_0269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name='xlm-roberta-base' , revision='d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3' , )
| 270
| 1
|
"""simple docstring"""
def snake_case__ ( ) ->list[list[int]]:
"""simple docstring"""
return [list(range(10_00 - i, -10_00 - i, -1 ) ) for i in range(10_00 )]
__A : Dict = generate_large_matrix()
__A : str = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def snake_case__ ( _lowerCamelCase ) ->None:
"""simple docstring"""
assert all(row == sorted(_lowerCamelCase, reverse=_lowerCamelCase ) for row in grid )
assert all(list(_lowerCamelCase ) == sorted(_lowerCamelCase, reverse=_lowerCamelCase ) for col in zip(*_lowerCamelCase ) )
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
__lowercase : Any = 0
__lowercase : int = len(_lowerCamelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowercase : List[Any] = (left + right) // 2
__lowercase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowercase : Dict = mid + 1
else:
__lowercase : List[Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCamelCase )
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
__lowercase : Any = 0
__lowercase : List[Any] = len(grid[0] )
for i in range(len(_lowerCamelCase ) ):
__lowercase : Dict = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCamelCase ) * len(grid[0] )) - total
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
return len([number for row in grid for number in row if number < 0] )
def snake_case__ ( _lowerCamelCase ) ->int:
"""simple docstring"""
__lowercase : Dict = 0
for row in grid:
for i, number in enumerate(_lowerCamelCase ):
if number < 0:
total += len(_lowerCamelCase ) - i
break
return total
def snake_case__ ( ) ->None:
"""simple docstring"""
from timeit import timeit
print("Running benchmarks" )
__lowercase : Optional[Any] = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowercase : Dict = timeit(F'{func}(grid=grid)', setup=_lowerCamelCase, number=5_00 )
print(F'{func}() took {time:0.4f} seconds' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 575
|
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__A : List[str] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCAmelCase__ :
"""simple docstring"""
__UpperCAmelCase : Dict = PegasusConfig
__UpperCAmelCase : int = {}
__UpperCAmelCase : Tuple = "gelu"
def __init__( self : List[str] , lowercase__ : int , lowercase__ : Union[str, Any]=1_3 , lowercase__ : Dict=7 , lowercase__ : Optional[Any]=True , lowercase__ : str=False , lowercase__ : Optional[int]=9_9 , lowercase__ : Tuple=3_2 , lowercase__ : Any=5 , lowercase__ : Any=4 , lowercase__ : Any=3_7 , lowercase__ : Any=0.1 , lowercase__ : List[str]=0.1 , lowercase__ : Tuple=2_0 , lowercase__ : str=2 , lowercase__ : int=1 , lowercase__ : Dict=0 , ):
__lowercase : int = parent
__lowercase : str = batch_size
__lowercase : Tuple = seq_length
__lowercase : Tuple = is_training
__lowercase : Dict = use_labels
__lowercase : List[str] = vocab_size
__lowercase : int = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : int = intermediate_size
__lowercase : Any = hidden_dropout_prob
__lowercase : Tuple = attention_probs_dropout_prob
__lowercase : List[Any] = max_position_embeddings
__lowercase : int = eos_token_id
__lowercase : Union[str, Any] = pad_token_id
__lowercase : Union[str, Any] = bos_token_id
def snake_case ( self : int ):
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowercase : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowercase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowercase : Optional[Any] = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def snake_case ( self : str , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ):
__lowercase : Union[str, Any] = 2_0
__lowercase : List[Any] = model_class_name(lowercase__ )
__lowercase : Tuple = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Any = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__lowercase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
__lowercase : List[Any] = model.decode(lowercase__ , lowercase__ )
__lowercase : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case ( self : Union[str, Any] , lowercase__ : Union[str, Any] , lowercase__ : List[str] , lowercase__ : Optional[Any] ):
__lowercase : Any = 2_0
__lowercase : Any = model_class_name(lowercase__ )
__lowercase : List[Any] = model.encode(inputs_dict["input_ids"] )
__lowercase ,__lowercase : Optional[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__lowercase : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowercase : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
__lowercase : List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowercase : str = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__lowercase : Dict = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
__lowercase : Union[str, Any] = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
__lowercase : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase=None, _lowerCamelCase=None, ) ->int:
"""simple docstring"""
if attention_mask is None:
__lowercase : List[str] = np.not_equal(_lowerCamelCase, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowercase : Optional[int] = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : int = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__UpperCAmelCase : Optional[int] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__UpperCAmelCase : Dict = True
__UpperCAmelCase : int = False
__UpperCAmelCase : Optional[int] = False
__UpperCAmelCase : Optional[int] = False
def snake_case ( self : List[Any] ):
__lowercase : Optional[Any] = FlaxPegasusModelTester(self )
__lowercase : Optional[Any] = ConfigTester(self , config_class=lowercase__ )
def snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Optional[int] ):
__lowercase ,__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def snake_case ( self : Tuple ):
__lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = self._prepare_for_class(lowercase__ , lowercase__ )
__lowercase : List[str] = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : List[str] , lowercase__ : int=None , **lowercase__ : Tuple ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest("JIT Enabled" ):
__lowercase : List[Any] = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Optional[Any] = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case ( self : Optional[Any] ):
__lowercase ,__lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase : Union[str, Any] = model_class(lowercase__ )
__lowercase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__lowercase : Optional[int] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Optional[int] , lowercase__ : Optional[int] , lowercase__ : Any ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest("JIT Enabled" ):
__lowercase : Tuple = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase : Any = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case ( self : Any ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowercase__ )
__lowercase : Any = np.ones((1, 1) )
__lowercase : Tuple = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def snake_case ( self : Optional[int] ):
__lowercase : str = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
__lowercase : Optional[Any] = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
__lowercase : Any = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__lowercase : Union[str, Any] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
__lowercase : Tuple = tokenizer(lowercase__ , return_tensors="np" , truncation=lowercase__ , max_length=5_1_2 , padding=lowercase__ )
__lowercase : Tuple = model.generate(**lowercase__ , num_beams=2 ).sequences
__lowercase : str = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 575
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__( __magic_name__ , unittest.TestCase ):
lowerCAmelCase = KandinskyVaaPipeline
lowerCAmelCase = [
'''image_embeds''',
'''negative_image_embeds''',
]
lowerCAmelCase = ['''image_embeds''', '''negative_image_embeds''']
lowerCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCAmelCase = False
@property
def _a ( self : str ) -> Union[str, Any]:
"""simple docstring"""
return 32
@property
def _a ( self : List[str] ) -> Dict:
"""simple docstring"""
return 32
@property
def _a ( self : int ) -> Optional[Any]:
"""simple docstring"""
return self.time_input_dim
@property
def _a ( self : Dict ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _a ( self : List[str] ) -> Any:
"""simple docstring"""
return 1_00
@property
def _a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def _a ( self : Dict ) -> str:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _a ( self : Dict ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.dummy_unet
__SCREAMING_SNAKE_CASE = self.dummy_movq
__SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _a ( self : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any=0 ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _a ( self : int ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''cpu'''
__SCREAMING_SNAKE_CASE = self.get_dummy_components()
__SCREAMING_SNAKE_CASE = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__( unittest.TestCase ):
def _a ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
__SCREAMING_SNAKE_CASE = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = '''red cat, 4k photo'''
__SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
__SCREAMING_SNAKE_CASE = torch.Generator(device='''cuda''' ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , output_type='''np''' , )
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 690
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase__ =8.9_8_8E9 # units = N * m^s * C^-2
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> dict[str, float]:
__SCREAMING_SNAKE_CASE = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
__SCREAMING_SNAKE_CASE = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
__SCREAMING_SNAKE_CASE = abs(UpperCAmelCase__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
__SCREAMING_SNAKE_CASE = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase__ )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 690
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 536
|
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : list[int] , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowercase_ ) )
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int , lowercase_ : list[int] , lowercase_ : int ):
"""simple docstring"""
if index == len(lowercase_ ):
return True
# Recursive Step
for i in range(lowercase_ ):
if valid_coloring(graph[index] , lowercase_ , lowercase_ ):
# Color current vertex
a_ = i
# Validate coloring
if util_color(lowercase_ , lowercase_ , lowercase_ , index + 1 ):
return True
# Backtrack
a_ = -1
return False
def __UpperCamelCase ( lowercase_ : list[list[int]] , lowercase_ : int ):
"""simple docstring"""
a_ = [-1] * len(lowercase_ )
if util_color(lowercase_ , lowercase_ , lowercase_ , 0 ):
return colored_vertices
return []
| 536
| 1
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
lowerCamelCase : Any = trt.Logger(trt.Logger.WARNING)
lowerCamelCase : List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
lowerCamelCase : Tuple = logging.getLogger(__name__)
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
lowerCamelCase : List[str] = parser.parse_args()
if args.tokenizer_name:
lowerCamelCase : List[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
lowerCamelCase : Optional[int] = args.per_device_eval_batch_size
lowerCamelCase : int = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
lowerCamelCase : Tuple = True
lowerCamelCase : str = 'temp_engine/bert-fp32.engine'
if args.fpaa:
lowerCamelCase : List[str] = 'temp_engine/bert-fp16.engine'
if args.inta:
lowerCamelCase : Any = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
lowerCamelCase : Tuple = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
lowerCamelCase : List[str] = [network.get_input(i) for i in range(network.num_inputs)]
lowerCamelCase : Optional[Any] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
lowerCamelCase : int = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
lowerCamelCase : Optional[int] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
lowerCamelCase : Optional[Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowercase__( A , A , A , A , A , A , A , A ):
snake_case__ : Optional[int] = np.asarray(inputs['input_ids'] , dtype=np.intaa )
snake_case__ : List[Any] = np.asarray(inputs['attention_mask'] , dtype=np.intaa )
snake_case__ : Tuple = np.asarray(inputs['token_type_ids'] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , A )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , A )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , A )
# start time
snake_case__ : Optional[int] = time.time()
# Run inference
context.execute_async(
bindings=[int(A ) for d_inp in d_inputs] + [int(A ), int(A )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(A , A , A )
cuda.memcpy_dtoh_async(A , A , A )
# Synchronize the stream and take time
stream.synchronize()
# end time
snake_case__ : Dict = time.time()
snake_case__ : str = end_time - start_time
snake_case__ : Union[str, Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
lowerCamelCase : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCamelCase : Union[str, Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
lowerCamelCase : Optional[int] = raw_datasets['validation'].column_names
lowerCamelCase : List[Any] = 'question' if 'question' in column_names else column_names[0]
lowerCamelCase : str = 'context' if 'context' in column_names else column_names[1]
lowerCamelCase : Tuple = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
lowerCamelCase : Union[str, Any] = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
lowerCamelCase : str = min(args.max_seq_length, tokenizer.model_max_length)
def lowercase__( A ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
snake_case__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
snake_case__ : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='only_second' if pad_on_right else 'only_first' , max_length=A , stride=args.doc_stride , return_overflowing_tokens=A , return_offsets_mapping=A , padding='max_length' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
snake_case__ : Union[str, Any] = tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
snake_case__ : List[Any] = []
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
snake_case__ : Optional[int] = tokenized_examples.sequence_ids(A )
snake_case__ : Any = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
snake_case__ : str = sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
snake_case__ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
lowerCamelCase : List[Any] = raw_datasets['validation']
# Validation Feature Creation
lowerCamelCase : str = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
lowerCamelCase : Tuple = default_data_collator
lowerCamelCase : List[str] = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
lowerCamelCase : str = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowercase__( A , A , A , A="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
snake_case__ : List[Any] = postprocess_qa_predictions(
examples=A , features=A , predictions=A , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=A , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
snake_case__ : Optional[Any] = [
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
snake_case__ : str = [{'id': k, 'prediction_text': v} for k, v in predictions.items()]
snake_case__ : Union[str, Any] = [{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=A , label_ids=A )
lowerCamelCase : Tuple = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowercase__( A ):
return trt.volume(engine.get_binding_shape(A ) ) * engine.get_binding_dtype(A ).itemsize
# Allocate device memory for inputs and outputs.
lowerCamelCase : Tuple = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
lowerCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
lowerCamelCase : List[Any] = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
lowerCamelCase : Tuple = cuda.mem_alloc(h_outputa.nbytes)
lowerCamelCase : str = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
lowerCamelCase : Any = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F""" Num examples = {len(eval_dataset)}""")
logger.info(F""" Batch size = {args.per_device_eval_batch_size}""")
lowerCamelCase : Optional[Any] = 0.0
lowerCamelCase : int = 0
lowerCamelCase : List[str] = timeit.default_timer()
lowerCamelCase : Tuple = None
for step, batch in enumerate(eval_dataloader):
lowerCamelCase : Optional[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
lowerCamelCase : Optional[Any] = outputs
lowerCamelCase : Optional[Any] = torch.tensor(start_logits)
lowerCamelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase : Optional[int] = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
lowerCamelCase : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
lowerCamelCase : Tuple = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
lowerCamelCase : Any = nested_truncate(all_preds, len(eval_dataset))
lowerCamelCase : List[str] = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
lowerCamelCase : List[Any] = post_processing_function(eval_examples, eval_dataset, all_preds)
lowerCamelCase : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"""Evaluation metrics: {eval_metric}""")
| 718
|
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
lowerCamelCase : Tuple = logging.getLogger(__name__)
lowerCamelCase : Union[str, Any] = 5_0 # max width of layer names
lowerCamelCase : Any = 7_0 # max width of quantizer names
def lowercase__( A ):
snake_case__ : Optional[int] = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=A , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=A , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=A , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=A , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=A , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=A , type=A , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=A , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowercase__( A ):
if args.calibrator == "max":
snake_case__ : Any = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
snake_case__ : List[Any] = 'histogram'
elif args.calibrator == "mse":
snake_case__ : str = 'histogram'
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
snake_case__ : Union[str, Any] = QuantDescriptor(num_bits=args.aprec , calib_method=A )
snake_case__ : List[Any] = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(A )
quant_nn.QuantLinear.set_default_quant_desc_weight(A )
def lowercase__( A , A , A=False , A=False ):
logger.info('Configuring Model for Quantization' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(A , ['embeddings'] , which='weight' , _disabled=A )
if args.quant_disable:
set_quantizer_by_name(A , [''] , _disabled=A )
if args.quant_disable_keyword:
set_quantizer_by_name(A , args.quant_disable_keyword , _disabled=A )
if args.quant_disable_layer_module:
set_quantizer_by_name(A , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=A )
if args.quant_enable_layer_module:
set_quantizer_by_name(A , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=A )
if args.recalibrate_weights:
recalibrate_weights(A )
if args.fuse_qkv:
fuse_qkv(A , A )
if args.clip_gelu:
clip_gelu(A , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(A )
def lowercase__( A ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def lowercase__( A , A ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(A )
def lowercase__( A , A ):
def fusea(A , A , A ):
for mod in [qq, qk, qv]:
if not hasattr(A , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
snake_case__ : Optional[int] = qq._amax.detach().item()
snake_case__ : Union[str, Any] = qk._amax.detach().item()
snake_case__ : Dict = qv._amax.detach().item()
snake_case__ : Optional[int] = max(A , A , A )
qq._amax.fill_(A )
qk._amax.fill_(A )
qv._amax.fill_(A )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowercase__( A , A ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
snake_case__ : List[str] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=A )
snake_case__ : Optional[int] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def lowercase__( A ):
for name, mod in model.named_modules():
if hasattr(A , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
snake_case__ : str = mod.weight.shape[0]
snake_case__ : List[Any] = mod._weight_quantizer._amax.detach()
snake_case__ : int = torch.ones(A , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def lowercase__( A ):
for name, mod in model.named_modules():
if hasattr(A , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case__ : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case__ : int = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case__ : Optional[Any] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=A , keepdims=A ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
snake_case__ : str = amax
def lowercase__( A , A=2_5 , A=1_8_0 , A=None ):
if ignore is None:
snake_case__ : List[str] = []
elif not isinstance(A , A ):
snake_case__ : Optional[int] = [ignore]
snake_case__ : List[Any] = 0
for name, mod in model.named_modules():
if not hasattr(A , 'weight' ):
continue
snake_case__ : int = max(A , len(A ) )
for name, mod in model.named_modules():
snake_case__ : Optional[int] = getattr(A , '_input_quantizer' , A )
snake_case__ : Tuple = getattr(A , '_weight_quantizer' , A )
if not hasattr(A , 'weight' ):
continue
if type(A ) in ignore:
continue
if [True for s in ignore if type(A ) is str and s in name]:
continue
snake_case__ : Dict = f'''Act:{input_q.extra_repr()}'''
snake_case__ : str = f'''Wgt:{weight_q.extra_repr()}'''
snake_case__ : Any = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(A ) <= line_width:
logger.info(A )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def lowercase__( A ):
snake_case__ : Optional[Any] = 0
for name, mod in model.named_modules():
if isinstance(A , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def lowercase__( A , A , A , A , A ):
snake_case__ : str = getattr(A , A , A )
if quantizer_mod is not None:
assert hasattr(A , A )
setattr(A , A , A )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def lowercase__( A , A , A="both" , **A ):
snake_case__ : Union[str, Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(A , A , '_input_quantizer' , A , A )
if which in ["weight", "both"]:
set_quantizer(A , A , '_weight_quantizer' , A , A )
logger.info(A )
def lowercase__( A , A , **A ):
for name, mod in model.named_modules():
if hasattr(A , '_input_quantizer' ) or hasattr(A , '_weight_quantizer' ):
for n in names:
if re.search(A , A ):
set_quantizers(A , A , **A )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(A , A ):
snake_case__ : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(A , A , A )
logger.info(A )
| 303
| 0
|
'''simple docstring'''
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Any , __snake_case : str = "cpu" , __snake_case : str = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase : Optional[int] = device
UpperCAmelCase : List[str] = CLIPTokenizerFast.from_pretrained(__snake_case )
UpperCAmelCase : List[str] = [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73]
UpperCAmelCase : int = [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11]
UpperCAmelCase : Union[str, Any] = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase : Optional[int] = torchvision.transforms.Resize(224 )
UpperCAmelCase : Union[str, Any] = torchvision.transforms.CenterCrop(224 )
def A ( self : List[Any] , __snake_case : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[Any] = self.resize(__snake_case )
UpperCAmelCase : Any = self.center_crop(__snake_case )
UpperCAmelCase : Dict = self.normalize(__snake_case )
return images
def __call__( self : List[Any] , __snake_case : int=None , __snake_case : Optional[Any]=None , **__snake_case : Tuple ) -> str:
UpperCAmelCase : List[str] = self.tokenizer(text=__snake_case , **__snake_case )
UpperCAmelCase : str = self.preprocess_img(__snake_case )
UpperCAmelCase : List[str] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE( nn.Module ):
"""simple docstring"""
def __init__( self : str , __snake_case : Any=10 , __snake_case : int=0.01 , __snake_case : List[str]=None , __snake_case : int=None , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : Union[str, Any]=None , __snake_case : List[str]=None , __snake_case : Optional[Any]=False , __snake_case : Union[str, Any]=True , __snake_case : Any="image" , __snake_case : int=True , __snake_case : Dict=False , __snake_case : Dict=False , __snake_case : Any=False , ) -> None:
super().__init__()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Optional[int] = device if device else get_device()
if vqgan:
UpperCAmelCase : Optional[Any] = vqgan
else:
UpperCAmelCase : Tuple = load_vqgan(self.device , conf_path=__snake_case , ckpt_path=__snake_case )
self.vqgan.eval()
if clip:
UpperCAmelCase : Tuple = clip
else:
UpperCAmelCase : List[str] = CLIPModel.from_pretrained('''openai/clip-vit-base-patch32''' )
self.clip.to(self.device )
UpperCAmelCase : List[str] = ProcessorGradientFlow(device=self.device )
UpperCAmelCase : Tuple = iterations
UpperCAmelCase : str = lr
UpperCAmelCase : Optional[Any] = log
UpperCAmelCase : int = make_grid
UpperCAmelCase : int = return_val
UpperCAmelCase : List[str] = quantize
UpperCAmelCase : Any = self.vqgan.decoder.z_shape
def A ( self : int , __snake_case : Union[str, Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=5 , __snake_case : List[Any]=True ) -> List[Any]:
UpperCAmelCase : Optional[int] = []
if output_path is None:
UpperCAmelCase : List[str] = '''./animation.gif'''
if input_path is None:
UpperCAmelCase : Optional[Any] = self.save_path
UpperCAmelCase : Dict = sorted(glob(input_path + '''/*''' ) )
if not len(__snake_case ):
raise ValueError(
'''No images found in save path, aborting (did you pass save_intermediate=True to the generate'''
''' function?)''' )
if len(__snake_case ) == 1:
print('''Only one image found in save path, (did you pass save_intermediate=True to the generate function?)''' )
UpperCAmelCase : List[str] = total_duration / len(__snake_case )
UpperCAmelCase : Union[str, Any] = [frame_duration] * len(__snake_case )
if extend_frames:
UpperCAmelCase : Optional[int] = 1.5
UpperCAmelCase : Optional[int] = 3
for file_name in paths:
if file_name.endswith('''.png''' ):
images.append(imageio.imread(__snake_case ) )
imageio.mimsave(__snake_case , __snake_case , duration=__snake_case )
print(F"""gif saved to {output_path}""" )
def A ( self : Optional[Any] , __snake_case : List[str]=None , __snake_case : int=None ) -> Union[str, Any]:
if not (path or img):
raise ValueError('''Input either path or tensor''' )
if img is not None:
raise NotImplementedError
UpperCAmelCase : int = preprocess(Image.open(__snake_case ) , target_image_size=256 ).to(self.device )
UpperCAmelCase : Optional[int] = preprocess_vqgan(__snake_case )
UpperCAmelCase , *UpperCAmelCase : List[str] = self.vqgan.encode(__snake_case )
return z
def A ( self : List[Any] , __snake_case : Tuple ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.latent.detach().requires_grad_()
UpperCAmelCase : Tuple = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase : List[Any] = self.vqgan.quantize(__snake_case )
else:
UpperCAmelCase : Dict = trans_latent
return self.vqgan.decode(__snake_case )
def A ( self : str , __snake_case : Dict , __snake_case : List[Any] , __snake_case : List[Any]=None ) -> Dict:
UpperCAmelCase : List[Any] = self.clip_preprocessor(text=__snake_case , images=__snake_case , return_tensors='''pt''' , padding=__snake_case )
UpperCAmelCase : int = self.clip(**__snake_case )
UpperCAmelCase : Union[str, Any] = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase : Any = similarity_logits * weights
return similarity_logits.sum()
def A ( self : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : Any = self._get_clip_similarity(pos_prompts['''prompts'''] , __snake_case , weights=(1 / pos_prompts['''weights''']) )
if neg_prompts:
UpperCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts['''prompts'''] , __snake_case , weights=neg_prompts['''weights'''] )
else:
UpperCAmelCase : List[str] = torch.tensor([1] , device=self.device )
UpperCAmelCase : List[Any] = -torch.log(__snake_case ) + torch.log(__snake_case )
return loss
def A ( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : Optional[int] , __snake_case : Optional[Any] ) -> str:
UpperCAmelCase : Any = torch.randn_like(self.latent , requires_grad=__snake_case , device=self.device )
UpperCAmelCase : int = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase : Tuple = self._add_vector(__snake_case )
UpperCAmelCase : Optional[Any] = loop_post_process(__snake_case )
UpperCAmelCase : Union[str, Any] = self._get_CLIP_loss(__snake_case , __snake_case , __snake_case )
print('''CLIP loss''' , __snake_case )
if self.log:
wandb.log({'''CLIP Loss''': clip_loss} )
clip_loss.backward(retain_graph=__snake_case )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def A ( self : List[str] , __snake_case : Any , __snake_case : Any , __snake_case : Tuple ) -> Optional[int]:
wandb.init(reinit=__snake_case , project='''face-editor''' )
wandb.config.update({'''Positive Prompts''': positive_prompts} )
wandb.config.update({'''Negative Prompts''': negative_prompts} )
wandb.config.update({'''lr''': self.lr, '''iterations''': self.iterations} )
if image_path:
UpperCAmelCase : List[Any] = Image.open(__snake_case )
UpperCAmelCase : int = image.resize((256, 256) )
wandb.log('''Original Image''' , wandb.Image(__snake_case ) )
def A ( self : Any , __snake_case : Any ) -> str:
if not prompts:
return []
UpperCAmelCase : Dict = []
UpperCAmelCase : Tuple = []
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase : int = [prompt.strip() for prompt in prompts.split('''|''' )]
for prompt in prompts:
if isinstance(__snake_case , (tuple, list) ):
UpperCAmelCase : int = prompt[0]
UpperCAmelCase : Union[str, Any] = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase : Any = prompt.split(''':''' )
UpperCAmelCase : int = float(__snake_case )
else:
UpperCAmelCase : Optional[int] = prompt
UpperCAmelCase : List[str] = 1.0
processed_prompts.append(__snake_case )
weights.append(__snake_case )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__snake_case , device=self.device ),
}
def A ( self : Dict , __snake_case : Any , __snake_case : str=None , __snake_case : str=None , __snake_case : Any=True , __snake_case : Optional[Any]=False , __snake_case : List[str]=True , __snake_case : Dict=True , __snake_case : Any=None , ) -> Union[str, Any]:
if image_path:
UpperCAmelCase : Union[str, Any] = self._get_latent(__snake_case )
else:
UpperCAmelCase : List[Any] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__snake_case , __snake_case , __snake_case )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase : Any = self.process_prompts(__snake_case )
UpperCAmelCase : List[str] = self.process_prompts(__snake_case )
if save_final and save_path is None:
UpperCAmelCase : List[str] = os.path.join('''./outputs/''' , '''_'''.join(pos_prompts['''prompts'''] ) )
if not os.path.exists(__snake_case ):
os.makedirs(__snake_case )
else:
UpperCAmelCase : Union[str, Any] = save_path + '''_''' + get_timestamp()
os.makedirs(__snake_case )
UpperCAmelCase : Optional[int] = save_path
UpperCAmelCase : str = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('''Original Image''' )
show_pil(custom_to_pil(__snake_case ) )
UpperCAmelCase : int = loop_post_process(__snake_case )
for iter, transformed_img in enumerate(self._optimize_CLIP(__snake_case , __snake_case , __snake_case ) ):
if show_intermediate:
show_pil(__snake_case )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}.png""" ) )
if self.log:
wandb.log({'''Image''': wandb.Image(__snake_case )} )
if show_final:
show_pil(__snake_case )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"""iter_{iter:03d}_final.png""" ) )
| 127
|
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCamelCase__: str = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCamelCase__: Union[str, Any] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCamelCase__: Any = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : int ) -> Dict:
return float((preds == labels).mean() )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : Dict ) -> Tuple:
UpperCAmelCase : List[Any] = simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Union[str, Any] = float(fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def snake_case_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : int ) -> List[Any]:
UpperCAmelCase : Any = np.array(_lowerCAmelCase )
UpperCAmelCase : Tuple = np.array(_lowerCAmelCase )
UpperCAmelCase : Optional[int] = en_sentvecs.shape[0]
# mean centering
UpperCAmelCase : str = en_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCAmelCase : Tuple = in_sentvecs - np.mean(_lowerCAmelCase , axis=0 )
UpperCAmelCase : Optional[int] = cdist(_lowerCAmelCase , _lowerCAmelCase , '''cosine''' )
UpperCAmelCase : str = np.array(range(_lowerCAmelCase ) )
UpperCAmelCase : List[Any] = sim.argsort(axis=1 )[:, :10]
UpperCAmelCase : Union[str, Any] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE( datasets.Metric ):
"""simple docstring"""
def A ( self : int ) -> Any:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def A ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : List[str] ) -> Optional[int]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(__snake_case , __snake_case )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 127
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {'''configuration_glpn''': ['''GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GLPNConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''GLPNFeatureExtractor''']
__A = ['''GLPNImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''GLPN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GLPNForDepthEstimation''',
'''GLPNLayer''',
'''GLPNModel''',
'''GLPNPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 61
|
'''simple docstring'''
__A = {str(digit): digit**5 for digit in range(1_0)}
def _SCREAMING_SNAKE_CASE ( A : int ) -> int:
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A ) )
def _SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
return sum(
number
for number in range(10_00 , 1_00_00_00 )
if number == digits_fifth_powers_sum(A ) )
if __name__ == "__main__":
print(solution())
| 61
| 1
|
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowerCAmelCase_ ( nn.Module ):
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int = 16 , SCREAMING_SNAKE_CASE_ : int = 88 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : int = 1 , SCREAMING_SNAKE_CASE_ : float = 0.0 , SCREAMING_SNAKE_CASE_ : int = 32 , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : str = "geglu" , SCREAMING_SNAKE_CASE_ : Optional[int] = None , ):
super().__init__()
lowerCAmelCase__ = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=SCREAMING_SNAKE_CASE_ , attention_head_dim=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , num_layers=SCREAMING_SNAKE_CASE_ , dropout=SCREAMING_SNAKE_CASE_ , norm_num_groups=SCREAMING_SNAKE_CASE_ , cross_attention_dim=SCREAMING_SNAKE_CASE_ , attention_bias=SCREAMING_SNAKE_CASE_ , sample_size=SCREAMING_SNAKE_CASE_ , num_vector_embeds=SCREAMING_SNAKE_CASE_ , activation_fn=SCREAMING_SNAKE_CASE_ , num_embeds_ada_norm=SCREAMING_SNAKE_CASE_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
lowerCAmelCase__ = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
lowerCAmelCase__ = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
lowerCAmelCase__ = [1, 0]
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : bool = True , ):
lowerCAmelCase__ = hidden_states
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
lowerCAmelCase__ = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
lowerCAmelCase__ = self.transformer_index_for_condition[i]
lowerCAmelCase__ = self.transformers[transformer_index](
SCREAMING_SNAKE_CASE_ , encoder_hidden_states=SCREAMING_SNAKE_CASE_ , timestep=SCREAMING_SNAKE_CASE_ , cross_attention_kwargs=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
lowerCAmelCase__ = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
lowerCAmelCase__ = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=SCREAMING_SNAKE_CASE_ )
| 668
|
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668
| 1
|
def __lowercase ( __lowerCAmelCase : List[str] ):
a__ = len(_A )
for i in range(_A ):
for j in range(i + 1 , _A ):
if numbers[j] < numbers[i]:
a__ , a__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
snake_case : Optional[Any] = input('''Enter numbers separated by a comma:\n''').strip()
snake_case : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 719
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657
| 0
|
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Dict , snake_case__ :str ) -> List[str]:
_lowercase = [1]
for i in range(2 , snake_case__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
_lowercase = []
_lowercase = list(range(snake_case__ ) )
# Find permutation
while factorials:
_lowercase = factorials.pop()
_lowercase , _lowercase = divmod(snake_case__ , snake_case__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67
|
import logging
import os
from .state import PartialState
class __lowerCAmelCase ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
snake_case_ = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
snake_case_ = kwargs.pop("main_process_only" , _lowerCAmelCase )
snake_case_ = kwargs.pop("in_order" , _lowerCAmelCase )
if self.isEnabledFor(_lowerCAmelCase ):
if self._should_log(_lowerCAmelCase ):
snake_case_ , snake_case_ = self.process(_lowerCAmelCase , _lowerCAmelCase )
self.logger.log(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
elif in_order:
snake_case_ = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case_ , snake_case_ = self.process(_lowerCAmelCase , _lowerCAmelCase )
self.logger.log(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
state.wait_for_everyone()
def _lowerCAmelCase ( lowerCAmelCase_ :str , lowerCAmelCase_ :str = None )->Any:
'''simple docstring'''
if log_level is None:
snake_case_ = os.environ.get("ACCELERATE_LOG_LEVEL" , lowerCAmelCase_ )
snake_case_ = logging.getLogger(lowerCAmelCase_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCAmelCase_ , {} )
| 283
| 0
|
"""simple docstring"""
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , A_ , )
class a ( A_ ):
A_ : Any = RobertaConfig
A_ : List[Any] = '''roberta'''
def __init__( self : Dict , lowerCamelCase_ : str ) -> int:
super().__init__(lowerCamelCase_ )
__a = RobertaEmbeddings(lowerCamelCase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , A_ , )
class a ( A_ ):
A_ : List[Any] = RobertaConfig
A_ : Optional[Any] = '''roberta'''
def __init__( self : int , lowerCamelCase_ : Union[str, Any] ) -> Tuple:
super().__init__(lowerCamelCase_ )
__a = config.num_labels
__a = config.num_hidden_layers
__a = DeeRobertaModel(lowerCamelCase_ )
__a = nn.Dropout(config.hidden_dropout_prob )
__a = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCamelCase_ )
def lowerCAmelCase_ ( self : Tuple , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Any=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : str=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : List[Any]=None , lowerCamelCase_ : Any=-1 , lowerCamelCase_ : Optional[int]=False , ) -> Dict:
__a = self.num_layers
try:
__a = self.roberta(
lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , )
__a = outputs[1]
__a = self.dropout(lowerCamelCase_ )
__a = self.classifier(lowerCamelCase_ )
__a = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__a = e.message
__a = e.exit_layer
__a = outputs[0]
if not self.training:
__a = entropy(lowerCamelCase_ )
__a = []
__a = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__a = []
for highway_exit in outputs[-1]:
__a = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCamelCase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__a = MSELoss()
__a = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__a = CrossEntropyLoss()
__a = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCamelCase_ )
if train_highway:
__a = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__a = (loss,) + outputs
if not self.training:
__a = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__a = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 705
|
"""simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class a ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
A_ : Dict = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def UpperCamelCase ( ):
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__a = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25l""" )
sys.stdout.flush()
def UpperCamelCase ( ):
if os.name == "nt":
__a = CursorInfo()
__a = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
__a = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCAmelCase , ctypes.byref(_lowerCAmelCase ) )
elif os.name == "posix":
sys.stdout.write("""\033[?25h""" )
sys.stdout.flush()
@contextmanager
def UpperCamelCase ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 173
| 0
|
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowercase__(A , A = True , A = math.inf , A = -math.inf , A = math.inf , A = -math.inf , A = False , A = 100 , A = 0.01 , A = 1 , ) ->Any:
"""simple docstring"""
lowercase__ : Any= False
lowercase__ : int= search_prob
lowercase__ : Optional[Any]= start_temperate
lowercase__ : Any= []
lowercase__ : Any= 0
lowercase__ : Dict= None
while not search_end:
lowercase__ : Optional[int]= current_state.score()
if best_state is None or current_score > best_state.score():
lowercase__ : Dict= current_state
scores.append(A )
iterations += 1
lowercase__ : int= None
lowercase__ : int= current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
lowercase__ : Optional[Any]= random.randint(0 , len(A ) - 1 ) # picking a random neighbor
lowercase__ : str= neighbors.pop(A )
lowercase__ : str= picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
lowercase__ : str= change * -1 # in case we are finding minimum
if change > 0: # improves the solution
lowercase__ : int= picked_neighbor
else:
lowercase__ : str= (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
lowercase__ : Dict= picked_neighbor
lowercase__ : List[str]= current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
lowercase__ : str= True
else:
lowercase__ : List[Any]= next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(A ) , A )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowercase__(A , A ) ->Dict:
"""simple docstring"""
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
a : List[Any] = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : int = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
# starting the problem with initial coordinates (12, 47)
a : Tuple = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"""The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 """
F"""and 50 > y > - 5 found via hill climbing: {local_min.score()}"""
)
def lowercase__(A , A ) ->int:
"""simple docstring"""
return (3 * x**2) - (6 * y)
a : Tuple = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : List[str] = simulated_annealing(prob, find_max=False, visualization=True)
print(
"""The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
a : int = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
a : Union[str, Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
"""The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: """
F"""{local_min.score()}"""
)
| 218
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
a : List[Any] = logging.get_logger(__name__)
a : Union[str, Any] = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a : Dict = [
"""small""",
"""small-base""",
"""medium""",
"""medium-base""",
"""intermediate""",
"""intermediate-base""",
"""large""",
"""large-base""",
"""xlarge""",
"""xlarge-base""",
]
a : List[Any] = {
"""vocab_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json""",
"""funnel-transformer/small-base""": (
"""https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json""",
"""funnel-transformer/medium-base""": (
"""https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json""",
"""funnel-transformer/large-base""": (
"""https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json"""
),
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json""",
"""funnel-transformer/xlarge-base""": (
"""https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json"""
),
},
}
a : Dict = {F"""funnel-transformer/{name}""": 512 for name in _model_names}
a : List[Any] = {F"""funnel-transformer/{name}""": {"""do_lower_case""": True} for name in _model_names}
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = FunnelTokenizer
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = 2
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=True , snake_case__="<unk>" , snake_case__="<sep>" , snake_case__="<pad>" , snake_case__="<cls>" , snake_case__="<mask>" , snake_case__="<s>" , snake_case__="</s>" , snake_case__=True , snake_case__=True , snake_case__=None , snake_case__="##" , **snake_case__ , ):
'''simple docstring'''
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , clean_text=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , wordpieces_prefix=snake_case__ , **snake_case__ , )
lowercase__ : Union[str, Any]= json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowercase__ : Any= getattr(snake_case__ , normalizer_state.pop("type" ) )
lowercase__ : Optional[Any]= do_lower_case
lowercase__ : Optional[Any]= strip_accents
lowercase__ : Optional[int]= tokenize_chinese_chars
lowercase__ : Optional[int]= normalizer_class(**snake_case__ )
lowercase__ : Optional[int]= do_lower_case
def UpperCAmelCase_ ( self , snake_case__ , snake_case__=None ):
'''simple docstring'''
lowercase__ : str= [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : int= [self.sep_token_id]
lowercase__ : List[str]= [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase_ ( self , snake_case__ , snake_case__ = None ):
'''simple docstring'''
lowercase__ : List[str]= self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 218
| 1
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def __lowerCamelCase ( A__ , A__ , A__ , A__ , A__ ) -> np.array:
"""simple docstring"""
UpperCamelCase = int(np.ceil((x_end - xa) / step_size ) )
UpperCamelCase = np.zeros((n + 1,) )
UpperCamelCase = ya
UpperCamelCase = xa
for k in range(A__ ):
UpperCamelCase = y[k] + step_size * ode_func(A__ , y[k] )
UpperCamelCase = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
|
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=A__ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=A__ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=A__ , help='where to store parsed gold_data_path file' , )
UpperCamelCase = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCamelCase = json.load(A__ )
for dpr_record in tqdm(A__ ):
UpperCamelCase = dpr_record['question']
UpperCamelCase = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(A__ ) + '\n' )
if __name__ == "__main__":
main()
| 324
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = SwinvaConfig()
__SCREAMING_SNAKE_CASE : Optional[Any] = swinva_name.split('''_''' )
__SCREAMING_SNAKE_CASE : Dict = name_split[1]
if "to" in name_split[3]:
__SCREAMING_SNAKE_CASE : int = int(name_split[3][-3:] )
else:
__SCREAMING_SNAKE_CASE : str = int(name_split[3] )
if "to" in name_split[2]:
__SCREAMING_SNAKE_CASE : Any = int(name_split[2][-2:] )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(name_split[2][6:] )
if model_size == "tiny":
__SCREAMING_SNAKE_CASE : int = 96
__SCREAMING_SNAKE_CASE : int = (2, 2, 6, 2)
__SCREAMING_SNAKE_CASE : Tuple = (3, 6, 12, 24)
elif model_size == "small":
__SCREAMING_SNAKE_CASE : Any = 96
__SCREAMING_SNAKE_CASE : Any = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (3, 6, 12, 24)
elif model_size == "base":
__SCREAMING_SNAKE_CASE : Optional[Any] = 128
__SCREAMING_SNAKE_CASE : Optional[Any] = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Tuple = (4, 8, 16, 32)
else:
__SCREAMING_SNAKE_CASE : Any = 192
__SCREAMING_SNAKE_CASE : Tuple = (2, 2, 18, 2)
__SCREAMING_SNAKE_CASE : Optional[int] = (6, 12, 24, 48)
if "to" in swinva_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
__SCREAMING_SNAKE_CASE : int = 21_841
__SCREAMING_SNAKE_CASE : List[Any] = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : List[str] = '''imagenet-22k-id2label.json'''
__SCREAMING_SNAKE_CASE : int = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
__SCREAMING_SNAKE_CASE : List[str] = {v: k for k, v in idalabel.items()}
else:
__SCREAMING_SNAKE_CASE : Tuple = 1_000
__SCREAMING_SNAKE_CASE : str = '''huggingface/label-files'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__SCREAMING_SNAKE_CASE : str = {int(snake_case ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : int = idalabel
__SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : int = img_size
__SCREAMING_SNAKE_CASE : Tuple = num_classes
__SCREAMING_SNAKE_CASE : str = embed_dim
__SCREAMING_SNAKE_CASE : Optional[int] = depths
__SCREAMING_SNAKE_CASE : str = num_heads
__SCREAMING_SNAKE_CASE : Dict = window_size
return config
def a__ ( snake_case ):
"""simple docstring"""
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__SCREAMING_SNAKE_CASE : str = '''encoder.''' + name
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE : int = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE : Any = '''layernorm.weight'''
if name == "norm.bias":
__SCREAMING_SNAKE_CASE : int = '''layernorm.bias'''
if "head" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''head''' , '''classifier''' )
else:
__SCREAMING_SNAKE_CASE : List[str] = '''swinv2.''' + name
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : str = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
__SCREAMING_SNAKE_CASE : Dict = key.split('''.''' )
__SCREAMING_SNAKE_CASE : List[Any] = int(key_split[1] )
__SCREAMING_SNAKE_CASE : Optional[int] = int(key_split[3] )
__SCREAMING_SNAKE_CASE : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[:dim, :]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : List[str] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
__SCREAMING_SNAKE_CASE : Optional[Any] = get_swinva_config(snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = SwinvaForImageClassification(snake_case )
model.eval()
__SCREAMING_SNAKE_CASE : List[str] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__SCREAMING_SNAKE_CASE : str = AutoImageProcessor.from_pretrained('''microsoft/{}'''.format(swinva_name.replace('''_''' , '''-''' ) ) )
__SCREAMING_SNAKE_CASE : Any = Image.open(requests.get(snake_case , stream=snake_case ).raw )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=snake_case , return_tensors='''pt''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = timm_model(inputs['''pixel_values'''] )
__SCREAMING_SNAKE_CASE : Dict = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization='''nandwalritik''' , commit_message='''Add model''' , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
lowercase_ = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 74
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
_UpperCAmelCase = logging.getLogger(__name__)
def UpperCamelCase ( __lowercase : Any ,__lowercase : List[str] ):
'''simple docstring'''
A_ : str = np.argmax(__lowercase ,axis=1 )
return np.sum(outputs == labels )
def UpperCamelCase ( __lowercase : Optional[int] ):
'''simple docstring'''
with open(__lowercase ,encoding='utf_8' ) as f:
A_ : Union[str, Any] = csv.reader(__lowercase )
A_ : Optional[Any] = []
next(__lowercase ) # skip the first line
for line in tqdm(__lowercase ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def UpperCamelCase ( __lowercase : str ,__lowercase : str ,__lowercase : int ,__lowercase : str ,__lowercase : Optional[Any] ,__lowercase : List[Any] ):
'''simple docstring'''
A_ : Dict = []
for dataset in encoded_datasets:
A_ : int = len(__lowercase )
A_ : List[str] = np.zeros((n_batch, 2, input_len) ,dtype=np.intaa )
A_ : List[str] = np.zeros((n_batch, 2) ,dtype=np.intaa )
A_ : Any = np.full((n_batch, 2, input_len) ,fill_value=-1_00 ,dtype=np.intaa )
A_ : List[Any] = np.zeros((n_batch,) ,dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(__lowercase ):
A_ : Optional[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Any = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : List[Any] = with_conta
A_ : Any = with_conta
A_ : List[str] = len(__lowercase ) - 1
A_ : List[str] = len(__lowercase ) - 1
A_ : List[Any] = with_conta
A_ : List[Any] = with_conta
A_ : Optional[Any] = mc_label
A_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(__lowercase ) for t in all_inputs ) )
return tensor_datasets
def UpperCamelCase ( ):
'''simple docstring'''
A_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--model_name' ,type=__lowercase ,default='openai-gpt' ,help='pretrained model name' )
parser.add_argument('--do_train' ,action='store_true' ,help='Whether to run training.' )
parser.add_argument('--do_eval' ,action='store_true' ,help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' ,default=__lowercase ,type=__lowercase ,required=__lowercase ,help='The output directory where the model predictions and checkpoints will be written.' ,)
parser.add_argument('--train_dataset' ,type=__lowercase ,default='' )
parser.add_argument('--eval_dataset' ,type=__lowercase ,default='' )
parser.add_argument('--seed' ,type=__lowercase ,default=42 )
parser.add_argument('--num_train_epochs' ,type=__lowercase ,default=3 )
parser.add_argument('--train_batch_size' ,type=__lowercase ,default=8 )
parser.add_argument('--eval_batch_size' ,type=__lowercase ,default=16 )
parser.add_argument('--adam_epsilon' ,default=1e-8 ,type=__lowercase ,help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' ,type=__lowercase ,default=1 )
parser.add_argument(
'--max_steps' ,default=-1 ,type=__lowercase ,help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) ,)
parser.add_argument(
'--gradient_accumulation_steps' ,type=__lowercase ,default=1 ,help='Number of updates steps to accumulate before performing a backward/update pass.' ,)
parser.add_argument('--learning_rate' ,type=__lowercase ,default=6.2_5e-5 )
parser.add_argument('--warmup_steps' ,default=0 ,type=__lowercase ,help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' ,type=__lowercase ,default='warmup_linear' )
parser.add_argument('--weight_decay' ,type=__lowercase ,default=0.01 )
parser.add_argument('--lm_coef' ,type=__lowercase ,default=0.9 )
parser.add_argument('--n_valid' ,type=__lowercase ,default=3_74 )
parser.add_argument('--server_ip' ,type=__lowercase ,default='' ,help='Can be used for distant debugging.' )
parser.add_argument('--server_port' ,type=__lowercase ,default='' ,help='Can be used for distant debugging.' )
A_ : Optional[Any] = parser.parse_args()
print(__lowercase )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) ,redirect_output=__lowercase )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
A_ : Tuple = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
A_ : Dict = torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(__lowercase ,__lowercase ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ : Tuple = ['_start_', '_delimiter_', '_classify_']
A_ : Optional[int] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(__lowercase )
A_ : Any = tokenizer.convert_tokens_to_ids(__lowercase )
A_ : Tuple = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(__lowercase ) )
model.to(__lowercase )
# Load and encode the datasets
def tokenize_and_encode(__lowercase : Tuple ):
if isinstance(__lowercase ,__lowercase ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(__lowercase ) )
elif isinstance(__lowercase ,__lowercase ):
return obj
return [tokenize_and_encode(__lowercase ) for o in obj]
logger.info('Encoding dataset...' )
A_ : Any = load_rocstories_dataset(args.train_dataset )
A_ : Any = load_rocstories_dataset(args.eval_dataset )
A_ : Dict = (train_dataset, eval_dataset)
A_ : str = tokenize_and_encode(__lowercase )
# Compute the max input length for the Transformer
A_ : Tuple = model.config.n_positions // 2 - 2
A_ : Optional[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) ,len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
A_ : str = min(__lowercase ,model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ : List[str] = pre_process_datasets(__lowercase ,__lowercase ,__lowercase ,*__lowercase )
A_ , A_ : List[str] = tensor_datasets[0], tensor_datasets[1]
A_ : str = TensorDataset(*__lowercase )
A_ : Optional[Any] = RandomSampler(__lowercase )
A_ : Optional[int] = DataLoader(__lowercase ,sampler=__lowercase ,batch_size=args.train_batch_size )
A_ : Optional[Any] = TensorDataset(*__lowercase )
A_ : Union[str, Any] = SequentialSampler(__lowercase )
A_ : List[str] = DataLoader(__lowercase ,sampler=__lowercase ,batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ : List[Any] = args.max_steps
A_ : List[str] = args.max_steps // (len(__lowercase ) // args.gradient_accumulation_steps) + 1
else:
A_ : int = len(__lowercase ) // args.gradient_accumulation_steps * args.num_train_epochs
A_ : List[Any] = list(model.named_parameters() )
A_ : int = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
A_ : Optional[int] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
A_ : int = AdamW(__lowercase ,lr=args.learning_rate ,eps=args.adam_epsilon )
A_ : str = get_linear_schedule_with_warmup(
__lowercase ,num_warmup_steps=args.warmup_steps ,num_training_steps=__lowercase )
if args.do_train:
A_ , A_ , A_ : str = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) ,desc='Epoch' ):
A_ : int = 0
A_ : str = 0
A_ : Any = tqdm(__lowercase ,desc='Training' )
for step, batch in enumerate(__lowercase ):
A_ : Optional[Any] = tuple(t.to(__lowercase ) for t in batch )
A_ , A_ , A_ , A_ : Optional[int] = batch
A_ : int = model(__lowercase ,mc_token_ids=__lowercase ,lm_labels=__lowercase ,mc_labels=__lowercase )
A_ : Optional[int] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ : Dict = 'Training loss: {:.2e} lr: {:.2e}'.format(__lowercase ,scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ : Optional[Any] = model.module if hasattr(__lowercase ,'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ : Tuple = os.path.join(args.output_dir ,__lowercase )
A_ : str = os.path.join(args.output_dir ,__lowercase )
torch.save(model_to_save.state_dict() ,__lowercase )
model_to_save.config.to_json_file(__lowercase )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
A_ : Optional[int] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
A_ : List[str] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(__lowercase )
if args.do_eval:
model.eval()
A_ , A_ : List[str] = 0, 0
A_ , A_ : Dict = 0, 0
for batch in tqdm(__lowercase ,desc='Evaluating' ):
A_ : List[str] = tuple(t.to(__lowercase ) for t in batch )
A_ , A_ , A_ , A_ : Tuple = batch
with torch.no_grad():
A_ , A_ , A_ , A_ : List[str] = model(
__lowercase ,mc_token_ids=__lowercase ,lm_labels=__lowercase ,mc_labels=__lowercase )
A_ : str = mc_logits.detach().cpu().numpy()
A_ : Optional[int] = mc_labels.to('cpu' ).numpy()
A_ : int = accuracy(__lowercase ,__lowercase )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
A_ : int = eval_loss / nb_eval_steps
A_ : Optional[Any] = eval_accuracy / nb_eval_examples
A_ : Union[str, Any] = tr_loss / nb_tr_steps if args.do_train else None
A_ : Any = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
A_ : Dict = os.path.join(args.output_dir ,'eval_results.txt' )
with open(__lowercase ,'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' ,__lowercase ,str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 558
| 0
|
import unittest
import numpy as np
def _SCREAMING_SNAKE_CASE ( __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray , __lowercase : np.ndarray | None = None , ) -> np.ndarray:
"""simple docstring"""
__A = np.shape(__lowercase )
__A = np.shape(__lowercase )
__A = np.shape(__lowercase )
if shape_a[0] != shape_b[0]:
__A = (
"""Expected the same number of rows for A and B. """
f"Instead found A of size {shape_a} and B of size {shape_b}"
)
raise ValueError(__lowercase )
if shape_b[1] != shape_c[1]:
__A = (
"""Expected the same number of columns for B and C. """
f"Instead found B of size {shape_b} and C of size {shape_c}"
)
raise ValueError(__lowercase )
__A = pseudo_inv
if a_inv is None:
try:
__A = np.linalg.inv(__lowercase )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
__A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A = np.array([[0, 3], [3, 0], [2, 3]] )
__A = np.array([[2, 1], [6, 3]] )
__A = schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__A = np.block([[a, b], [b.T, c]] )
__A = np.linalg.det(UpperCamelCase_ )
__A = np.linalg.det(UpperCamelCase_ )
__A = np.linalg.det(UpperCamelCase_ )
self.assertAlmostEqual(UpperCamelCase_ , det_a * det_s )
def lowerCAmelCase_ ( self : Dict ):
"""simple docstring"""
__A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A = np.array([[0, 3], [3, 0], [2, 3]] )
__A = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
__A = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__A = np.array([[0, 3], [3, 0], [2, 3]] )
__A = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 199
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ["image_processor", "tokenizer"]
SCREAMING_SNAKE_CASE = "AutoImageProcessor"
SCREAMING_SNAKE_CASE = "AutoTokenizer"
def __init__( self : str , UpperCamelCase_ : Any , UpperCamelCase_ : str ):
"""simple docstring"""
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
__A = self.image_processor
def __call__( self : Optional[Any] , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None , **UpperCamelCase_ : int ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__A = self.tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if images is not None:
__A = self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None and images is not None:
__A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase_ ) , tensor_type=UpperCamelCase_ )
def lowerCAmelCase_ ( self : List[str] , *UpperCamelCase_ : Tuple , **UpperCamelCase_ : List[str] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ )
def lowerCAmelCase_ ( self : Any , *UpperCamelCase_ : int , **UpperCamelCase_ : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ )
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 199
| 1
|
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__lowerCAmelCase : int =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class UpperCAmelCase ( UpperCamelCase__ ):
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :GenericTensor )-> np.ndarray:
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase_ )
else:
raise ValueError("Unsupported framework" )
return masked_index
def UpperCAmelCase_ ( self :List[str] , lowercase_ :GenericTensor )-> np.ndarray:
A__ = self.get_masked_index(lowercase_ )
A__ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F"No mask_token ({self.tokenizer.mask_token}) found on the input" , )
def UpperCAmelCase_ ( self :str , lowercase_ :GenericTensor )-> Any:
if isinstance(lowercase_ , lowercase_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowercase_ )
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Any , lowercase_ :Dict=None , **lowercase_ :Dict )-> Dict[str, GenericTensor]:
if return_tensors is None:
A__ = self.framework
A__ = self.tokenizer(lowercase_ , return_tensors=lowercase_ )
self.ensure_exactly_one_mask_token(lowercase_ )
return model_inputs
def UpperCAmelCase_ ( self :Any , lowercase_ :Tuple )-> Any:
A__ = self.model(**lowercase_ )
A__ = model_inputs["input_ids"]
return model_outputs
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any]=5 , lowercase_ :str=None )-> int:
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
A__ = target_ids.shape[0]
A__ = model_outputs["input_ids"][0]
A__ = model_outputs["logits"]
if self.framework == "tf":
A__ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A__ = outputs.numpy()
A__ = outputs[0, masked_index, :]
A__ = stable_softmax(lowercase_ , axis=-1 )
if target_ids is not None:
A__ = tf.gather_nd(tf.squeeze(lowercase_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A__ = tf.expand_dims(lowercase_ , 0 )
A__ = tf.math.top_k(lowercase_ , k=lowercase_ )
A__, A__ = topk.values.numpy(), topk.indices.numpy()
else:
A__ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A__ = outputs[0, masked_index, :]
A__ = logits.softmax(dim=-1 )
if target_ids is not None:
A__ = probs[..., target_ids]
A__, A__ = probs.topk(lowercase_ )
A__ = []
A__ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A__ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A__ = input_ids.numpy().copy()
if target_ids is not None:
A__ = target_ids[p].tolist()
A__ = p
# Filter padding out:
A__ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A__ = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ )
A__ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(lowercase_ )
result.append(lowercase_ )
if single_mask:
return result[0]
return result
def UpperCAmelCase_ ( self :str , lowercase_ :List[Any] , lowercase_ :Tuple=None )-> Optional[Any]:
if isinstance(lowercase_ , lowercase_ ):
A__ = [targets]
try:
A__ = self.tokenizer.get_vocab()
except Exception:
A__ = {}
A__ = []
for target in targets:
A__ = vocab.get(lowercase_ , lowercase_ )
if id_ is None:
A__ = self.tokenizer(
lowercase_ , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , max_length=1 , truncation=lowercase_ , )["input_ids"]
if len(lowercase_ ) == 0:
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
"We cannot replace it with anything meaningful, ignoring it" )
continue
A__ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"The specified target token `{target}` does not exist in the model vocabulary. "
F"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
A__ = list(set(lowercase_ ) )
if len(lowercase_ ) == 0:
raise ValueError("At least one target must be provided when passed." )
A__ = np.array(lowercase_ )
return target_ids
def UpperCAmelCase_ ( self :str , lowercase_ :Any=None , lowercase_ :List[Any]=None )-> List[Any]:
A__ = {}
if targets is not None:
A__ = self.get_target_ids(lowercase_ , lowercase_ )
A__ = target_ids
if top_k is not None:
A__ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self :Tuple , lowercase_ :List[Any] , *lowercase_ :Any , **lowercase_ :int )-> Union[str, Any]:
A__ = super().__call__(lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1:
return outputs[0]
return outputs
| 440
|
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase :
@staticmethod
def UpperCAmelCase_ ( *lowercase_ :Optional[Any] , **lowercase_ :List[Any] )-> Optional[int]:
pass
def UpperCamelCase ( _lowerCamelCase : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase : int =(
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
__lowercase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :Dict )-> int:
A__ = pipeline(
"document-question-answering" , model=lowercase_ , tokenizer=lowercase_ , image_processor=lowercase_ )
A__ = INVOICE_URL
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
A__ = "What is the placebo?"
A__ = [
{
"image": load_image(lowercase_ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase_ ( self :str , lowercase_ :List[Any] , lowercase_ :List[str] )-> List[str]:
A__ = dqa_pipeline(lowercase_ , top_k=2 )
self.assertEqual(
lowercase_ , [
[
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
{"score": ANY(lowercase_ ), "answer": ANY(lowercase_ ), "start": ANY(lowercase_ ), "end": ANY(lowercase_ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Dict )-> Optional[Any]:
A__ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
A__ = INVOICE_URL
A__ = "How many cats are there?"
A__ = [
{"score": 0.0_0_0_1, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_0_0_1, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , lowercase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
A__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
# We can optionnally pass directly the words and bounding boxes
A__ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
A__ = []
A__ = []
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , words=lowercase_ , boxes=lowercase_ , top_k=2 )
self.assertEqual(lowercase_ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple:
A__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_4_4, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_0_0_9, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
A__ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_7_4, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_9_4_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :List[Any] )-> Any:
A__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
A__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
A__ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.4_2_5_1, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_8_1_9, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase_ ( self :Optional[int] )-> Union[str, Any]:
A__ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=lowercase_ )
A__ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=lowercase_ , revision="3dc6de3" , max_seq_len=50 , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
A__ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
[
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
A__ = list(zip(*apply_tesseract(load_image(lowercase_ ) , lowercase_ , "" ) ) )
# This model should also work if `image` is set to None
A__ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ , decimals=4 ) , [
{"score": 0.9_9_9_9, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_9_9_8, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def UpperCAmelCase_ ( self :Optional[int] )-> Tuple:
A__ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
A__ = INVOICE_URL
A__ = "What is the invoice number?"
A__ = dqa_pipeline(image=lowercase_ , question=lowercase_ , top_k=2 )
self.assertEqual(nested_simplify(lowercase_ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def UpperCAmelCase_ ( self :Optional[Any] )-> List[str]:
pass
| 440
| 1
|
"""simple docstring"""
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_text_model'''
def __init__( self : List[Any] , _UpperCAmelCase : str=49408 , _UpperCAmelCase : str=512 , _UpperCAmelCase : Optional[Any]=2048 , _UpperCAmelCase : Optional[int]=12 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : List[str]=16 , _UpperCAmelCase : List[str]="quick_gelu" , _UpperCAmelCase : Dict=1e-5 , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[int]=1.0 , _UpperCAmelCase : Dict=0 , _UpperCAmelCase : Dict=49406 , _UpperCAmelCase : Union[str, Any]=49407 , **_UpperCAmelCase : List[str] , ) -> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : int , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : List[str] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit_vision_model'''
def __init__( self : str , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : Optional[Any]=3072 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : List[str]=768 , _UpperCAmelCase : int=32 , _UpperCAmelCase : Dict="quick_gelu" , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : List[str]=1.0 , **_UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Union[str, Any] ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
UpperCAmelCase_ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''owlvit'''
UpperCamelCase = True
def __init__( self : Tuple , _UpperCAmelCase : Any=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Any=2.6592 , _UpperCAmelCase : Union[str, Any]=True , **_UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
if text_config is None:
UpperCAmelCase_ = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
UpperCAmelCase_ = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
UpperCAmelCase_ = OwlViTTextConfig(**_UpperCAmelCase )
UpperCAmelCase_ = OwlViTVisionConfig(**_UpperCAmelCase )
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = logit_scale_init_value
UpperCAmelCase_ = return_dict
UpperCAmelCase_ = 1.0
@classmethod
def lowercase__ ( cls : Dict , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
@classmethod
def lowercase__ ( cls : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = {}
UpperCAmelCase_ = text_config
UpperCAmelCase_ = vision_config
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ = self.text_config.to_dict()
UpperCAmelCase_ = self.vision_config.to_dict()
UpperCAmelCase_ = self.__class__.model_type
return output
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowercase__ ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowercase__ ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowercase__ ( self : Any ) -> float:
'''simple docstring'''
return 1e-4
def lowercase__ ( self : List[str] , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , framework=_UpperCAmelCase )
UpperCAmelCase_ = super().generate_dummy_inputs(
processor.image_processor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
return 14
| 14
|
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = XLMProphetNetTokenizer
UpperCamelCase = False
UpperCamelCase = True
def lowercase__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self : Tuple ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "[PAD]"
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def lowercase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCAmelCase ) , 1012 )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def lowercase__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = XLMProphetNetTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [35389, 6672, 49, 2]
self.assertListEqual(_UpperCAmelCase , self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = {"input_ids": [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 14
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__lowerCamelCase : Dict = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 404
|
'''simple docstring'''
import os
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =len(grid[0] )
_UpperCamelCase =len(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =0
_UpperCamelCase =0
_UpperCamelCase =0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(n_rows - 3 ):
_UpperCamelCase =grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
_UpperCamelCase =grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
_UpperCamelCase =(
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
_UpperCamelCase =(
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
_UpperCamelCase =max(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if max_product > largest:
_UpperCamelCase =max_product
return largest
def _a ():
"""simple docstring"""
_UpperCamelCase =[]
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '''/grid.txt''' ) as file:
for line in file:
grid.append(line.strip('''\n''' ).split(''' ''' ) )
_UpperCamelCase =[[int(__SCREAMING_SNAKE_CASE ) for i in grid[j]] for j in range(len(__SCREAMING_SNAKE_CASE ) )]
return largest_product(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution())
| 404
| 1
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , A_ : str , A_ : Tuple=7 , A_ : Tuple=3 , A_ : Optional[Any]=30 , A_ : Optional[int]=4_00 , A_ : Optional[int]=True , A_ : Optional[int]=None , A_ : str=True , A_ : Union[str, Any]=[0.5, 0.5, 0.5] , A_ : int=[0.5, 0.5, 0.5] , A_ : Optional[Any]=True , A_ : Tuple=1 / 2_55 , A_ : Union[str, Any]=True , )-> Optional[Any]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__UpperCamelCase = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = num_channels
__UpperCamelCase = min_resolution
__UpperCamelCase = max_resolution
__UpperCamelCase = do_resize
__UpperCamelCase = size
__UpperCamelCase = do_normalize
__UpperCamelCase = image_mean
__UpperCamelCase = image_std
__UpperCamelCase = do_rescale
__UpperCamelCase = rescale_factor
__UpperCamelCase = do_pad
def A ( self : Optional[Any] )-> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A_ : Union[str, Any] , A_ : List[str]=False )-> Optional[int]:
if not batched:
__UpperCamelCase = image_inputs[0]
if isinstance(A_ , Image.Image ):
__UpperCamelCase , __UpperCamelCase = image.size
else:
__UpperCamelCase , __UpperCamelCase = image.shape[1], image.shape[2]
if w < h:
__UpperCamelCase = int(self.size["shortest_edge"] * h / w )
__UpperCamelCase = self.size["shortest_edge"]
elif w > h:
__UpperCamelCase = self.size["shortest_edge"]
__UpperCamelCase = int(self.size["shortest_edge"] * w / h )
else:
__UpperCamelCase = self.size["shortest_edge"]
__UpperCamelCase = self.size["shortest_edge"]
else:
__UpperCamelCase = []
for image in image_inputs:
__UpperCamelCase , __UpperCamelCase = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__UpperCamelCase = max(A_ , key=lambda A_ : item[0] )[0]
__UpperCamelCase = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : int = YolosImageProcessor if is_vision_available() else None
def A ( self : int )-> Union[str, Any]:
__UpperCamelCase = YolosImageProcessingTester(self )
@property
def A ( self : str )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : List[str] )-> Dict:
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , "image_mean" ) )
self.assertTrue(hasattr(A_ , "image_std" ) )
self.assertTrue(hasattr(A_ , "do_normalize" ) )
self.assertTrue(hasattr(A_ , "do_resize" ) )
self.assertTrue(hasattr(A_ , "size" ) )
def A ( self : int )-> Dict:
__UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , A_ )
__UpperCamelCase = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , A_ )
def A ( self : str )-> Tuple:
pass
def A ( self : Any )-> Optional[int]:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
__UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[Any] )-> Union[str, Any]:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Optional[int] )-> Union[str, Any]:
# Initialize image_processing
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__UpperCamelCase = image_processing(A_ , return_tensors="pt" ).pixel_values
__UpperCamelCase , __UpperCamelCase = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str )-> Tuple:
# Initialize image_processings
__UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
__UpperCamelCase = self.image_processing_class(do_resize=A_ , do_normalize=A_ , do_rescale=A_ )
# create random PyTorch tensors
__UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
__UpperCamelCase = image_processing_a.pad(A_ , return_tensors="pt" )
__UpperCamelCase = image_processing_a(A_ , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def A ( self : str )-> Any:
# prepare image and target
__UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {"image_id": 3_97_69, "annotations": target}
# encode them
__UpperCamelCase = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
__UpperCamelCase = image_processing(images=A_ , annotations=A_ , return_tensors="pt" )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
__UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
__UpperCamelCase = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
__UpperCamelCase = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify orig_size
__UpperCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
__UpperCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
@slow
def A ( self : str )-> int:
# prepare image, target and masks_path
__UpperCamelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
__UpperCamelCase = json.loads(f.read() )
__UpperCamelCase = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
__UpperCamelCase = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
__UpperCamelCase = YolosImageProcessor(format="coco_panoptic" )
__UpperCamelCase = image_processing(images=A_ , annotations=A_ , masks_path=A_ , return_tensors="pt" )
# verify pixel values
__UpperCamelCase = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , A_ )
__UpperCamelCase = torch.tensor([0.2_796, 0.3_138, 0.3_481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , A_ , atol=1e-4 ) )
# verify area
__UpperCamelCase = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , A_ ) )
# verify boxes
__UpperCamelCase = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , A_ )
__UpperCamelCase = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , A_ , atol=1e-3 ) )
# verify image_id
__UpperCamelCase = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , A_ ) )
# verify is_crowd
__UpperCamelCase = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , A_ ) )
# verify class_labels
__UpperCamelCase = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , A_ ) )
# verify masks
__UpperCamelCase = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , A_ )
# verify orig_size
__UpperCamelCase = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , A_ ) )
# verify size
__UpperCamelCase = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , A_ ) )
| 711
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : Union[List[PIL.Image.Image], np.ndarray]
_snake_case : Optional[List[bool]]
_snake_case : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 228
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : int = emb.weight.shape
SCREAMING_SNAKE_CASE : List[str] = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.data
return lin_layer
def __A ( lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE : Any = key.replace("""moe_layer.experts.0""" , f'''ffn.experts.expert_{expert_idx}''' )
else:
SCREAMING_SNAKE_CASE : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
SCREAMING_SNAKE_CASE : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE : Tuple = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE : List[Any] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
SCREAMING_SNAKE_CASE : int = state_dict[old_key]
return new_dict
def __A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = WEIGHTS_NAME ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = []
SCREAMING_SNAKE_CASE : List[Any] = 0
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
for expert in range(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(lowerCamelCase_ )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = os.path.join(
lowerCamelCase_ , weights_name.replace(""".bin""" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(lowerCamelCase_ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE : Tuple = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , f'''-{len(lowerCamelCase_ )+1:05d}-of-???.bin''' ) )
SCREAMING_SNAKE_CASE : Any = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = rename_fairseq_keys(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(lowerCamelCase_ ) == 1:
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
torch.save(lowerCamelCase_ , lowerCamelCase_ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(lowerCamelCase_ , lowerCamelCase_ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE : int = {}
for idx, shard in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : List[Any] = weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-{len(lowerCamelCase_ ):05d}.bin''' )
SCREAMING_SNAKE_CASE : str = os.path.join(lowerCamelCase_ , weights_name.replace(""".bin""" , f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_ ) )
for key in shard:
SCREAMING_SNAKE_CASE : List[Any] = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE : Dict = {"""total_size""": total_size}
SCREAMING_SNAKE_CASE : Dict = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , """w""" , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE : Dict = json.dumps(lowerCamelCase_ , indent=2 , sort_keys=lowerCamelCase_ ) + """\n"""
f.write(lowerCamelCase_ )
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase , __UpperCAmelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCAmelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 379
|
'''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class UpperCamelCase__ :
"""simple docstring"""
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Dict=True , lowerCamelCase_ : List[Any]=False , lowerCamelCase_ : str=False , lowerCamelCase_ : Optional[int]=False , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = 4
SCREAMING_SNAKE_CASE : Any = 32
SCREAMING_SNAKE_CASE : Union[str, Any] = (32, 32)
SCREAMING_SNAKE_CASE : int = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.device(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = (batch_size, num_channels) + sizes
SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = {"""hidden_states""": hidden_states}
if include_temb:
SCREAMING_SNAKE_CASE : Union[str, Any] = 1_28
SCREAMING_SNAKE_CASE : Optional[Any] = randn_tensor((batch_size, temb_channels) , generator=lowerCamelCase_ , device=lowerCamelCase_ )
if include_res_hidden_states_tuple:
SCREAMING_SNAKE_CASE : str = torch.manual_seed(1 )
SCREAMING_SNAKE_CASE : int = (randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ ),)
if include_encoder_hidden_states:
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((batch_size, 32, 32) ).to(lowerCamelCase_ )
if include_skip_sample:
SCREAMING_SNAKE_CASE : str = randn_tensor(((batch_size, 3) + sizes) , generator=lowerCamelCase_ , device=lowerCamelCase_ )
return dummy_input
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 1_28,
}
if self.block_type == "up":
SCREAMING_SNAKE_CASE : List[str] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Tuple = self.block_class(**lowerCamelCase_ )
unet_block.to(lowerCamelCase_ )
unet_block.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[str] = unet_block(**lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Dict = output[0]
self.assertEqual(output.shape , self.output_shape )
SCREAMING_SNAKE_CASE : Dict = output[0, -1, -3:, -3:]
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
assert torch_all_close(output_slice.flatten() , lowerCamelCase_ , atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" , """Training is not supported in mps""" )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE : Union[str, Any] = self.block_class(**lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
SCREAMING_SNAKE_CASE : Tuple = model(**lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Optional[Any] = output[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.device(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[int] = randn_tensor(output.shape , device=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = torch.nn.functional.mse_loss(lowerCamelCase_ , lowerCamelCase_ )
loss.backward()
| 379
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
snake_case_ = logging.get_logger(__name__)
class a_ ( __lowerCAmelCase ):
def __init__( self : int , *__lowerCAmelCase : Dict , **__lowerCAmelCase : str ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 717
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
_lowercase = imread(r"""digital_image_processing/image_data/lena_small.jpg""")
_lowercase = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase__ ( ):
__snake_case = cn.convert_to_negative(a )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase__ ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(a , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def lowerCamelCase__ ( ):
__snake_case = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase__ ( ):
__snake_case = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__snake_case = canny.canny(a )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase__ ( ):
assert gg.gaussian_filter(a , 5 , sigma=0.9 ).all()
def lowerCamelCase__ ( ):
# laplace diagonals
__snake_case = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__snake_case = conv.img_convolve(a , a ).astype(a )
assert res.any()
def lowerCamelCase__ ( ):
assert med.median_filter(a , 3 ).any()
def lowerCamelCase__ ( ):
__snake_case , __snake_case = sob.sobel_filter(a )
assert grad.any() and theta.any()
def lowerCamelCase__ ( ):
__snake_case = sp.make_sepia(a , 20 )
assert sepia.all()
def lowerCamelCase__ ( a = "digital_image_processing/image_data/lena_small.jpg" ):
__snake_case = bs.Burkes(imread(a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase__ ( a = "digital_image_processing/image_data/lena_small.jpg" , ):
__snake_case = rs.NearestNeighbour(imread(a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCamelCase__ ( ):
__snake_case = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
__snake_case = imread(a , 0 )
# Test for get_neighbors_pixel function() return not None
__snake_case = 0
__snake_case = 0
__snake_case = image[x_coordinate][y_coordinate]
__snake_case = lbp.get_neighbors_pixel(
a , a , a , a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__snake_case = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__snake_case = lbp.local_binary_value(a , a , a )
assert lbp_image.any()
| 427
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : UNetaDModel
A__ : KarrasVeScheduler
def __init__( self : List[str] , _snake_case : UNetaDModel , _snake_case : KarrasVeScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=_snake_case , scheduler=_snake_case )
@torch.no_grad()
def __call__( self : Optional[int] , _snake_case : int = 1 , _snake_case : int = 50 , _snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _snake_case : Optional[str] = "pil" , _snake_case : bool = True , **_snake_case : int , ):
"""simple docstring"""
A__ = self.unet.config.sample_size
A__ = (batch_size, 3, img_size, img_size)
A__ = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
A__ = randn_tensor(_snake_case , generator=_snake_case , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
A__ = self.scheduler.schedule[t]
A__ = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
A__ , A__ = self.scheduler.add_noise_to_input(_snake_case , _snake_case , generator=_snake_case )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
A__ = self.scheduler.step(_snake_case , _snake_case , _snake_case , _snake_case )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
A__ = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
A__ = self.scheduler.step_correct(
_snake_case , _snake_case , _snake_case , _snake_case , step_output.prev_sample , step_output['derivative'] , )
A__ = step_output.prev_sample
A__ = (sample / 2 + 0.5).clamp(0 , 1 )
A__ = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 9
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _A( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__A : Tuple = Counter(_A )
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Union[str, Any] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple[Optional[str], str]:
__A : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : List[Any] = full_content[1:].index('---' ) + 1
__A : Dict = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _A ):
with open(_A , encoding='utf-8' ) as readme_file:
__A , __A : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def UpperCAmelCase_ ( self , _A ):
if path.exists():
with open(_A , encoding='utf-8' ) as readme_file:
__A : Union[str, Any] = readme_file.read()
else:
__A : List[Any] = None
__A : Any = self._to_readme(_A )
with open(_A , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_A )
def UpperCAmelCase_ ( self , _A = None ):
if readme_content is not None:
__A , __A : str = _split_yaml_from_readme(_A )
__A : Any = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : List[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : Optional[int] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def UpperCAmelCase_ ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='utf-8' , ).decode('utf-8' )
UpperCAmelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Any = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCAmelCase : Any = ap.parse_args()
UpperCAmelCase : Any = Path(args.readme_filepath)
UpperCAmelCase : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 239
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple=13 , __UpperCAmelCase : List[Any]=7 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Any=True , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=99 , __UpperCAmelCase : str=32 , __UpperCAmelCase : str=5 , __UpperCAmelCase : int=4 , __UpperCAmelCase : List[Any]=37 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : int=0.1 , __UpperCAmelCase : str=512 , __UpperCAmelCase : Any=16 , __UpperCAmelCase : List[Any]=2 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : List[Any]=4 , ):
a : List[Any] = parent
a : List[str] = batch_size
a : str = seq_length
a : List[Any] = is_training
a : List[Any] = use_attention_mask
a : str = use_token_type_ids
a : Tuple = use_labels
a : Dict = vocab_size
a : Tuple = hidden_size
a : Tuple = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Union[str, Any] = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Tuple = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : str = type_vocab_size
a : List[str] = type_sequence_label_size
a : List[str] = initializer_range
a : Any = num_choices
def __snake_case ( self : Union[str, Any]):
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Optional[Any] = None
if self.use_attention_mask:
a : int = random_attention_mask([self.batch_size, self.seq_length])
a : int = None
if self.use_token_type_ids:
a : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __snake_case ( self : Optional[Any]):
a : Dict = self.prepare_config_and_inputs()
a , a , a , a : Any = config_and_inputs
a : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __snake_case ( self : str):
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a , a : Optional[Any] = config_and_inputs
a : Tuple = True
a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = True
UpperCAmelCase : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __snake_case ( self : Any):
a : Any = FlaxRobertaPreLayerNormModelTester(self)
@slow
def __snake_case ( self : Optional[int]):
for model_class_name in self.all_model_classes:
a : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__UpperCAmelCase)
a : Optional[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(__UpperCAmelCase)
@require_flax
class _A ( unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : List[Any]):
a : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__UpperCAmelCase)
a : Optional[Any] = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa)
a : Any = model(__UpperCAmelCase)[0]
a : Optional[Any] = [1, 11, 50265]
self.assertEqual(list(output.shape) , __UpperCAmelCase)
# compare the actual values for a slice.
a : Any = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : List[Any]):
a : Dict = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=__UpperCAmelCase)
a : Any = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa)
a : str = model(__UpperCAmelCase)[0]
# compare the actual values for a slice.
a : Dict = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , __UpperCAmelCase , atol=1e-4))
| 135
|
"""simple docstring"""
from timeit import timeit
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : Dict = 0
while number:
number &= number - 1
result += 1
return result
def lowercase ( A_ )-> int:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
a : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase ( )-> None:
'''simple docstring'''
def do_benchmark(A_ ) -> None:
a : Tuple = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(A_ ) = }''' )
a : List[Any] = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=A_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(A_ ) = }''' )
a : Dict = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=A_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(A_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 135
| 1
|
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
lowerCAmelCase_ : Tuple = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ : List[str] = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
lowerCAmelCase_ : Any = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
lowerCAmelCase_ : Any = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
lowerCAmelCase_ : Dict = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
lowerCAmelCase_ : Optional[Any] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
lowerCAmelCase_ : Dict = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
lowerCAmelCase_ : Tuple = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
lowerCAmelCase_ : Any = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
lowerCAmelCase_ : int = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
lowerCAmelCase_ : Optional[int] = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
lowerCAmelCase_ : int = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
lowerCAmelCase_ : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
lowerCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
lowerCAmelCase_ : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
lowerCAmelCase_ : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
lowerCAmelCase_ : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
lowerCAmelCase_ : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
lowerCAmelCase_ : Any = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
lowerCAmelCase_ : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ : Optional[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
lowerCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
lowerCAmelCase_ : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
lowerCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
lowerCAmelCase_ : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
lowerCAmelCase_ : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : List[str] = FLAX_MODEL_MAPPING
lowerCAmelCase_ : str = auto_class_update(FlaxAutoModel)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING
lowerCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : Dict = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
lowerCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : List[str] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : str = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ : int = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : List[str] = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
lowerCAmelCase_ : Dict = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
lowerCAmelCase_ : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
lowerCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Optional[Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : int = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase_ : List[Any] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class SCREAMING_SNAKE_CASE ( _BaseAutoModelClass ):
__magic_name__ : int = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
lowerCAmelCase_ : Union[str, Any] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 442
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : List[str] = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : Optional[Any] = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ : List[str] = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
lowerCAmelCase_ : Optional[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class SCREAMING_SNAKE_CASE ( snake_case_ ):
__magic_name__ : Any = VOCAB_FILES_NAMES
__magic_name__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Union[str, Any] = ['''input_ids''', '''attention_mask''']
__magic_name__ : List[int] = []
__magic_name__ : List[int] = []
def __init__( self : int , lowercase__ : Tuple , lowercase__ : Optional[int]=None , lowercase__ : str=None , lowercase__ : Dict="</s>" , lowercase__ : Tuple="</s>" , lowercase__ : Any="<s>" , lowercase__ : Tuple="<unk>" , lowercase__ : List[Any]="<pad>" , lowercase__ : Dict="<mask>" , lowercase__ : Optional[Dict[str, Any]] = None , **lowercase__ : Tuple , ):
'''simple docstring'''
a_ : Any = AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
a_ : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
a_ : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase__ , tgt_lang=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , **lowercase__ , )
a_ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
a_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
a_ : Tuple = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
a_ : List[str] = 1
a_ : List[Any] = len(self.sp_model )
a_ : List[str] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
a_ : str = {v: k for k, v in self.lang_code_to_id.items()}
a_ : Any = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
a_ : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
a_ : List[Any] = src_lang if src_lang is not None else """en_XX"""
a_ : Any = self.lang_code_to_id[self._src_lang]
a_ : Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def lowercase_ ( self : Optional[Any] , lowercase__ : str ):
'''simple docstring'''
a_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Any ):
'''simple docstring'''
a_ : str = self.__dict__.copy()
a_ : Dict = None
return state
def __setstate__( self : Optional[Any] , lowercase__ : Dict ):
'''simple docstring'''
a_ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
a_ : Tuple = {}
a_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : int ):
'''simple docstring'''
a_ : Dict = {self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : Optional[int] , lowercase__ : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def lowercase_ ( self : Tuple , lowercase__ : str ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a_ : List[str] = self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ ( self : Optional[int] , lowercase__ : int ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ ( self : Tuple , lowercase__ : str ):
'''simple docstring'''
a_ : Union[str, Any] = []
a_ : List[str] = """"""
a_ : Dict = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase__ ) + token
a_ : str = True
a_ : Optional[Any] = []
else:
current_sub_tokens.append(lowercase__ )
a_ : Union[str, Any] = False
out_string += self.sp_model.decode(lowercase__ )
return out_string.strip()
def lowercase_ ( self : Tuple , lowercase__ : str , lowercase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
a_ : Dict = os.path.join(
lowercase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , """wb""" ) as fi:
a_ : Dict = self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def lowercase_ ( self : List[str] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None , lowercase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
a_ : Union[str, Any] = [1] * len(self.prefix_tokens )
a_ : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def lowercase_ ( self : List[str] , lowercase__ : List[int] , lowercase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ ( self : Any , lowercase__ : Optional[int] , lowercase__ : str , lowercase__ : Optional[str] , lowercase__ : Optional[str] , **lowercase__ : Optional[int] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
a_ : Union[str, Any] = src_lang
a_ : Dict = self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
a_ : Tuple = self.convert_tokens_to_ids(lowercase__ )
a_ : Optional[Any] = tgt_lang_id
return inputs
def lowercase_ ( self : Optional[Any] , lowercase__ : List[str] , lowercase__ : str = "en_XX" , lowercase__ : Optional[List[str]] = None , lowercase__ : str = "ro_RO" , **lowercase__ : Union[str, Any] , ):
'''simple docstring'''
a_ : str = src_lang
a_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ ( self : Optional[int] , lowercase__ : str ):
'''simple docstring'''
a_ : Any = self.lang_code_to_id[src_lang]
a_ : Dict = [self.cur_lang_code_id]
a_ : Any = [self.eos_token_id]
def lowercase_ ( self : Any , lowercase__ : str ):
'''simple docstring'''
a_ : Union[str, Any] = self.lang_code_to_id[tgt_lang]
a_ : str = [self.cur_lang_code_id]
a_ : Union[str, Any] = [self.eos_token_id]
| 442
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : str , snake_case : Union[str, Any]=13 , snake_case : int=7 , snake_case : Union[str, Any]=True , snake_case : Tuple=True , snake_case : Tuple=True , snake_case : Union[str, Any]=True , snake_case : Tuple=99 , snake_case : str=32 , snake_case : Tuple=5 , snake_case : int=4 , snake_case : Union[str, Any]=37 , snake_case : Union[str, Any]="gelu" , snake_case : List[Any]=0.1 , snake_case : List[Any]=0.1 , snake_case : Any=512 , snake_case : Dict=16 , snake_case : Dict=2 , snake_case : int=0.02 , snake_case : Dict=3 , snake_case : Tuple=4 , snake_case : Any=None , ) -> Tuple:
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : int = batch_size
__UpperCAmelCase : List[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : Union[str, Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_token_type_ids
__UpperCAmelCase : Optional[Any] = use_labels
__UpperCAmelCase : str = vocab_size
__UpperCAmelCase : List[str] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : Optional[int] = num_attention_heads
__UpperCAmelCase : str = intermediate_size
__UpperCAmelCase : Tuple = hidden_act
__UpperCAmelCase : Tuple = hidden_dropout_prob
__UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
__UpperCAmelCase : List[str] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = type_vocab_size
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Tuple = initializer_range
__UpperCAmelCase : Union[str, Any] = num_labels
__UpperCAmelCase : List[str] = num_choices
__UpperCAmelCase : List[str] = scope
def lowerCamelCase__ ( self : Optional[int] ) -> Any:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase : Union[str, Any] = None
__UpperCAmelCase : int = None
__UpperCAmelCase : Union[str, Any] = None
if self.use_labels:
__UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase : int = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCamelCase__ ( self : int , snake_case : Optional[int] , snake_case : str , snake_case : Optional[Any] , snake_case : List[str] , snake_case : str , snake_case : Union[str, Any] , snake_case : Tuple ) -> Dict:
__UpperCAmelCase : Any = NystromformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
__UpperCAmelCase : List[str] = model(snake_case , token_type_ids=snake_case )
__UpperCAmelCase : Dict = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Dict , snake_case : int , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : List[str] , snake_case : Any , snake_case : Tuple ) -> List[Any]:
__UpperCAmelCase : List[Any] = NystromformerForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase__ ( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : int , snake_case : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : Any ) -> List[str]:
__UpperCAmelCase : int = self.num_labels
__UpperCAmelCase : str = NystromformerForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : List[str] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase__ ( self : Tuple , snake_case : List[Any] , snake_case : Optional[int] , snake_case : int , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str] ) -> Optional[Any]:
__UpperCAmelCase : str = self.num_labels
__UpperCAmelCase : int = NystromformerForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] , snake_case : Union[str, Any] ) -> Any:
__UpperCAmelCase : List[str] = self.num_choices
__UpperCAmelCase : List[str] = NystromformerForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase : Union[str, Any] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase__ ( self : List[Any] ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) : int = config_and_inputs
__UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE : Union[str, Any] = (
{
"feature-extraction": NystromformerModel,
"fill-mask": NystromformerForMaskedLM,
"question-answering": NystromformerForQuestionAnswering,
"text-classification": NystromformerForSequenceClassification,
"token-classification": NystromformerForTokenClassification,
"zero-shot": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : Tuple = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
def lowerCamelCase__ ( self : Dict ) -> List[str]:
__UpperCAmelCase : str = NystromformerModelTester(self )
__UpperCAmelCase : List[str] = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : int ) -> Any:
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Dict ) -> Tuple:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__UpperCAmelCase : List[str] = type
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase__ ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase__ ( self : str ) -> int:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Any ) -> Union[str, Any]:
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : List[Any] = NystromformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(snake_case )[0]
__UpperCAmelCase : Tuple = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
__UpperCAmelCase : Tuple = torch.tensor(
[[[-0.4_532, -0.0_936, 0.5_137], [-0.2_676, 0.0_628, 0.6_186], [-0.3_629, -0.1_726, 0.4_716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : Optional[int] ) -> str:
__UpperCAmelCase : str = '''the [MASK] of Belgium is Brussels'''
__UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Tuple = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' )
__UpperCAmelCase : Optional[Any] = tokenizer(snake_case , return_tensors='''pt''' )
with torch.no_grad():
__UpperCAmelCase : Union[str, Any] = model(encoding.input_ids ).logits
__UpperCAmelCase : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(snake_case ) , '''capital''' )
| 266
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , snake_case : List[Any] , snake_case : int , snake_case : int ) -> List[Any]:
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
__UpperCAmelCase : str = img
__UpperCAmelCase : List[Any] = img.shape[1]
__UpperCAmelCase : Optional[Any] = img.shape[0]
__UpperCAmelCase : Dict = dst_width
__UpperCAmelCase : List[str] = dst_height
__UpperCAmelCase : Union[str, Any] = self.src_w / self.dst_w
__UpperCAmelCase : List[str] = self.src_h / self.dst_h
__UpperCAmelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def lowerCamelCase__ ( self : Any ) -> str:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__UpperCAmelCase : Any = self.img[self.get_y(snake_case )][self.get_x(snake_case )]
def lowerCamelCase__ ( self : int , snake_case : int ) -> int:
return int(self.ratio_x * x )
def lowerCamelCase__ ( self : Optional[Any] , snake_case : int ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
__UpperCAmelCase , __UpperCAmelCase :int = 8_0_0, 6_0_0
__UpperCAmelCase :Dict = imread("image_data/lena.jpg", 1)
__UpperCAmelCase :int = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 266
| 1
|
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowerCAmelCase :
'''simple docstring'''
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Dict , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[Any] ) -> Optional[int]:
"""simple docstring"""
return None
class lowerCAmelCase :
'''simple docstring'''
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Dict , lowerCamelCase_ :Any ) -> Dict:
"""simple docstring"""
return None
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
A = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def lowerCamelCase__ ( self :str ) -> List[str]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , "tf" , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def lowerCamelCase__ ( self :Optional[int] ) -> Dict:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(lowerCamelCase_ , "pt" , 1_2 , **lowerCamelCase_ )
@require_torch
@slow
def lowerCamelCase__ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
from transformers import BertModel
UpperCamelCase__ = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(lowerCamelCase_ ) )
vocab_file.flush()
UpperCamelCase__ = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
UpperCamelCase__ = BertModel(BertConfig(vocab_size=len(lowerCamelCase_ ) ) )
model.save_pretrained(lowerCamelCase_ )
self._test_export(lowerCamelCase_ , "pt" , 1_2 , lowerCamelCase_ )
@require_tf
@slow
def lowerCamelCase__ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase__ = self._test_export(lowerCamelCase_ , "tf" , 1_2 , **lowerCamelCase_ )
UpperCamelCase__ = quantize(Path(lowerCamelCase_ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def lowerCamelCase__ ( self :str ) -> Optional[Any]:
"""simple docstring"""
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
UpperCamelCase__ = self._test_export(lowerCamelCase_ , "pt" , 1_2 , **lowerCamelCase_ )
UpperCamelCase__ = quantize(lowerCamelCase_ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(lowerCamelCase_ ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple , lowerCamelCase_ :List[str]=None , **lowerCamelCase_ :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
try:
# Compute path
with TemporaryDirectory() as tempdir:
UpperCamelCase__ = Path(lowerCamelCase_ ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return path
except Exception as e:
self.fail(lowerCamelCase_ )
@require_torch
@require_tokenizers
@slow
def lowerCamelCase__ ( self :List[str] ) -> Dict:
"""simple docstring"""
from transformers import BertModel
UpperCamelCase__ = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , "pt" )
@require_tf
@require_tokenizers
@slow
def lowerCamelCase__ ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
from transformers import TFBertModel
UpperCamelCase__ = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
UpperCamelCase__ = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(lowerCamelCase_ , lowerCamelCase_ , "tf" )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> int:
"""simple docstring"""
UpperCamelCase__ = FeatureExtractionPipeline(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = infer_shapes(lowerCamelCase_ , lowerCamelCase_ )
# Assert all variables are present
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , lowerCamelCase_ )
self.assertSequenceEqual(variable_names[3:] , lowerCamelCase_ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def lowerCamelCase__ ( self :int ) -> str:
"""simple docstring"""
UpperCamelCase__ = ["input_ids", "attention_mask", "token_type_ids"]
UpperCamelCase__ = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
UpperCamelCase__ , UpperCamelCase__ = ensure_valid_input(FuncContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(lowerCamelCase_ ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(lowerCamelCase_ ) , set(lowerCamelCase_ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(lowerCamelCase_ , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
UpperCamelCase__ , UpperCamelCase__ = ensure_valid_input(FuncNonContiguousArgs() , lowerCamelCase_ , lowerCamelCase_ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(lowerCamelCase_ ) , 1 )
self.assertEqual(len(lowerCamelCase_ ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def lowerCamelCase__ ( self :Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 516
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _A ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_snake_case : int = RoFormerTokenizer
_snake_case : Optional[Any] = RoFormerTokenizerFast
_snake_case : int = True
_snake_case : Tuple = True
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
def _snake_case ( self : Optional[int] , **lowerCamelCase : int ):
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : List[Any] , **lowerCamelCase : List[str] ):
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **lowerCamelCase )
def _snake_case ( self : Dict ):
'''simple docstring'''
__lowercase = "永和服装饰品有限公司,今天天气非常好"
__lowercase = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def _snake_case ( self : int ):
'''simple docstring'''
__lowercase = self.get_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : str ):
'''simple docstring'''
__lowercase = self.get_rust_tokenizer()
__lowercase , __lowercase = self.get_chinese_input_output_texts()
__lowercase = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , output_text.split() )
__lowercase = tokens + [tokenizer.unk_token]
__lowercase = [22_943, 21_332, 34_431, 45_904, 117, 306, 1_231, 1_231, 2_653, 33_994, 1_266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _snake_case ( self : str ):
'''simple docstring'''
pass
| 402
| 0
|
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = ["input_features", "attention_mask"]
def __init__( self , _lowerCAmelCase=80 , _lowerCAmelCase=16_000 , _lowerCAmelCase=0.0 , _lowerCAmelCase=10 , _lowerCAmelCase=25 , _lowerCAmelCase="hamming_window" , _lowerCAmelCase=32_768.0 , _lowerCAmelCase=0.97 , _lowerCAmelCase=1.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , **_lowerCAmelCase , ):
super().__init__(feature_size=snake_case__ , sampling_rate=snake_case__ , padding_value=snake_case__ , **snake_case__ )
a =feature_size
a =sampling_rate
a =padding_value
a =hop_length
a =win_length
a =frame_signal_scale
a =preemphasis_coeff
a =mel_floor
a =normalize_means
a =normalize_vars
a =win_function
a =return_attention_mask
a =win_length * sampling_rate // 1_000
a =hop_length * sampling_rate // 1_000
a =optimal_fft_length(self.sample_size )
a =(self.n_fft // 2) + 1
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
if self.win_function == "hamming_window":
a =window_function(window_length=self.sample_size , name=self.win_function , periodic=snake_case__ )
else:
a =window_function(window_length=self.sample_size , name=self.win_function )
a =mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
a =spectrogram(
one_waveform * self.frame_signal_scale , window=snake_case__ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=snake_case__ , preemphasis=self.preemphasis_coeff , mel_filters=snake_case__ , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# make sure we normalize float32 arrays
if self.normalize_means:
a =x[:input_length].mean(axis=0 )
a =np.subtract(snake_case__ , snake_case__ )
if self.normalize_vars:
a =x[:input_length].std(axis=0 )
a =np.divide(snake_case__ , snake_case__ )
if input_length < x.shape[0]:
a =padding_value
# make sure array is in float32
a =x.astype(np.floataa )
return x
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
a =attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(snake_case__ , snake_case__ , self.padding_value ) for x, n in zip(snake_case__ , snake_case__ )]
def __call__( self , _lowerCAmelCase , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , **_lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
a =isinstance(snake_case__ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
a =is_batched_numpy or (
isinstance(snake_case__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
a =[np.asarray(snake_case__ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(snake_case__ , np.ndarray ):
a =np.asarray(snake_case__ , dtype=np.floataa )
elif isinstance(snake_case__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
a =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
a =[raw_speech]
# extract fbank features
a =[self._extract_mfsc_features(snake_case__ ) for one_waveform in raw_speech]
# convert into correct format for padding
a =BatchFeature({"""input_features""": features} )
a =self.pad(
snake_case__ , padding=snake_case__ , max_length=snake_case__ , truncation=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , **snake_case__ , )
# make sure list is in array format
a =padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , snake_case__ ):
a =[np.asarray(snake_case__ , dtype=np.floataa ) for feature in input_features]
a =padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
a =[np.asarray(snake_case__ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
a =(
np.array(snake_case__ , dtype=np.intaa )
if self._get_padding_strategies(snake_case__ , max_length=snake_case__ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
a =self.normalize(
padded_inputs["""input_features"""] , attention_mask=snake_case__ )
if return_tensors is not None:
a =padded_inputs.convert_to_tensors(snake_case__ )
return padded_inputs
| 719
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = MvpTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = MvpTokenizerFast
_SCREAMING_SNAKE_CASE : Optional[int] = True
_SCREAMING_SNAKE_CASE : Any = filter_roberta_detectors
def lowerCAmelCase__ ( self ):
super().setUp()
a =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
a =dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
a =["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
a ={"""unk_token""": """<unk>"""}
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
a =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(_lowerCAmelCase ) )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , **_lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCAmelCase__ ( self ):
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
a =[0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , max_length=len(_lowerCAmelCase ) , padding=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
a =batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""" , _lowerCAmelCase )
self.assertIn("""attention_mask""" , _lowerCAmelCase )
self.assertNotIn("""labels""" , _lowerCAmelCase )
self.assertNotIn("""decoder_attention_mask""" , _lowerCAmelCase )
@require_torch
def lowerCAmelCase__ ( self ):
a =[
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(text_target=_lowerCAmelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCAmelCase , _lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def lowerCAmelCase__ ( self ):
a =["""A long paragraph for summarization."""]
a =[
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a =tokenizer(_lowerCAmelCase , text_target=_lowerCAmelCase , return_tensors="""pt""" )
a =inputs["""input_ids"""]
a =inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
a =self.rust_tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a =self.tokenizer_class.from_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
a ="""A, <mask> AllenNLP sentence."""
a =tokenizer_r.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
a =tokenizer_p.encode_plus(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
a =tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
a =tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
_lowerCAmelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 321
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.