code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = """glpn"""
def __init__( self , __A=3 , __A=4 , __A=[2, 2, 2, 2] , __A=[8, 4, 2, 1] , __A=[32, 64, 160, 256] , __A=[7, 3, 3, 3] , __A=[4, 2, 2, 2] , __A=[1, 2, 5, 8] , __A=[4, 4, 4, 4] , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.02 , __A=0.1 , __A=1E-6 , __A=64 , __A=10 , __A=-1 , **__A , ):
super().__init__(**__A )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = max_depth
__a = head_in_index
| 99
|
"""simple docstring"""
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
__lowerCamelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase ( self : str , a_ : Optional[Any]=0 ) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = np.random.RandomState(a_ )
a__ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : Union[str, Any] = pipe(**a_ ).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : List[str] = np.array([0.6_5072, 0.5_8492, 0.4_8219, 0.5_5521, 0.5_3180, 0.5_5939, 0.5_0697, 0.3_9800, 0.4_6455] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=a_ )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : List[Any] = pipe(**a_ ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : List[str] = np.array([0.6_5863, 0.5_9425, 0.4_9326, 0.5_6313, 0.5_3875, 0.5_6627, 0.5_1065, 0.3_9777, 0.4_6330] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Tuple:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[Any] = self.get_dummy_inputs()
a__ : Optional[Any] = pipe(**a_ ).images
a__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Dict = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Dict = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = self.get_dummy_inputs()
a__ : int = pipe(**a_ ).images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : str = np.array([0.5_3755, 0.6_0786, 0.4_7402, 0.4_9488, 0.5_1869, 0.4_9819, 0.4_7985, 0.3_8957, 0.4_4279] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
'''simple docstring'''
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs()
a__ : List[str] = pipe(**a_ ).images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Any = np.array([0.5_3817, 0.6_0812, 0.4_7384, 0.4_9530, 0.5_1894, 0.4_9814, 0.4_7984, 0.3_8958, 0.4_4271] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : Tuple ) -> Any:
'''simple docstring'''
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
a__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=a_ )
a__ : Tuple = self.get_dummy_inputs()
a__ : List[str] = pipe(**a_ ).images
a__ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
a__ : Any = np.array([0.5_3895, 0.6_0808, 0.4_7933, 0.4_9608, 0.5_1886, 0.4_9950, 0.4_8053, 0.3_8957, 0.4_4200] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Any = self.get_dummy_inputs()
a__ : Any = 3 * [inputs["prompt"]]
# forward
a__ : Union[str, Any] = pipe(**a_ )
a__ : int = output.images[0, -3:, -3:, -1]
a__ : Union[str, Any] = self.get_dummy_inputs()
a__ : List[Any] = 3 * [inputs.pop("prompt" )]
a__ : Optional[Any] = pipe.tokenizer(
a_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="np" , )
a__ : List[str] = text_inputs["input_ids"]
a__ : int = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
a__ : List[Any] = prompt_embeds
# forward
a__ : List[Any] = pipe(**a_ )
a__ : List[str] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=a_ )
a__ : Tuple = self.get_dummy_inputs()
a__ : Dict = 3 * ["this is a negative prompt"]
a__ : Optional[Any] = negative_prompt
a__ : Any = 3 * [inputs["prompt"]]
# forward
a__ : str = pipe(**a_ )
a__ : List[str] = output.images[0, -3:, -3:, -1]
a__ : Union[str, Any] = self.get_dummy_inputs()
a__ : Union[str, Any] = 3 * [inputs.pop("prompt" )]
a__ : List[Any] = []
for p in [prompt, negative_prompt]:
a__ : int = pipe.tokenizer(
a_ , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=a_ , return_tensors="np" , )
a__ : Any = text_inputs["input_ids"]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
a__ , a__ : Union[str, Any] = embeds
# forward
a__ : Dict = pipe(**a_ )
a__ : Any = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
@property
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
a__ : List[str] = ort.SessionOptions()
a__ : List[str] = False
return options
def UpperCAmelCase ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
a__ : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : Optional[int] = "A painting of a squirrel eating a burger"
np.random.seed(0 )
a__ : Dict = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
a__ : Any = output.images
a__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : str = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a__ : str = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : str = "open neural network exchange"
a__ : Tuple = np.random.RandomState(0 )
a__ : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" )
a__ : Dict = output.images
a__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : Dict = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : Optional[int] ) -> Dict:
'''simple docstring'''
a__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
a__ : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=a_ )
a__ : Any = "open neural network exchange"
a__ : Optional[Any] = np.random.RandomState(0 )
a__ : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=a_ , output_type="np" )
a__ : int = output.images
a__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ : Dict = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase ( self : int ) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = 0
def test_callback_fn(a_ : int , a_ : int , a_ : np.ndarray ) -> None:
a__ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
a__ : Any = latents[0, -3:, -3:, -1]
a__ : Union[str, Any] = np.array(
[-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
a__ : Union[str, Any] = latents[0, -3:, -3:, -1]
a__ : Optional[int] = np.array(
[-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
a__ : Tuple = False
a__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=a_ )
a__ : List[Any] = "Andromeda galaxy in a bottle"
a__ : str = np.random.RandomState(0 )
pipe(
prompt=a_ , num_inference_steps=5 , guidance_scale=7.5 , generator=a_ , callback=a_ , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def UpperCAmelCase ( self : Tuple ) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=a_ , feature_extractor=a_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(a_ , a_ )
assert pipe.safety_checker is None
a__ : Tuple = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
a__ : Optional[int] = OnnxStableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
a__ : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 642
| 0
|
import math
import sys
import cva
import numpy as np
def snake_case ( UpperCAmelCase : np.ndarray, UpperCAmelCase : float ):
# For applying gaussian function for each element in matrix.
A = math.sqrt(_lowerCamelCase )
A = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def snake_case ( UpperCAmelCase : np.ndarray, UpperCAmelCase : int, UpperCAmelCase : int, UpperCAmelCase : int ):
A = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def snake_case ( UpperCAmelCase : int, UpperCAmelCase : float ):
# Creates a gaussian kernel of given dimension.
A = np.zeros((kernel_size, kernel_size) )
for i in range(0, _lowerCamelCase ):
for j in range(0, _lowerCamelCase ):
A = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_lowerCamelCase, _lowerCamelCase )
def snake_case ( UpperCAmelCase : np.ndarray, UpperCAmelCase : float, UpperCAmelCase : float, UpperCAmelCase : int, ):
A = np.zeros(img.shape )
A = get_gauss_kernel(_lowerCamelCase, _lowerCamelCase )
A = img.shape
for i in range(kernel_size // 2, size_x - kernel_size // 2 ):
for j in range(kernel_size // 2, size_y - kernel_size // 2 ):
A = get_slice(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
A = img_s - img_s[kernel_size // 2, kernel_size // 2]
A = vec_gaussian(_lowerCamelCase, _lowerCamelCase )
A = np.multiply(_lowerCamelCase, _lowerCamelCase )
A = np.multiply(_lowerCamelCase, _lowerCamelCase )
A = np.sum(_lowerCamelCase ) / np.sum(_lowerCamelCase )
A = val
return imga
def snake_case ( UpperCAmelCase : list ):
A = args[1] if args[1:] else """../image_data/lena.jpg"""
A = float(args[2] ) if args[2:] else 1.0
A = float(args[3] ) if args[3:] else 1.0
if args[4:]:
A = int(args[4] )
A = kernel_size + abs(kernel_size % 2 - 1 )
else:
A = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parse_args(sys.argv)
lowerCAmelCase_ = cva.imread(filename, 0)
cva.imshow('input image', img)
lowerCAmelCase_ = img / 255
lowerCAmelCase_ = out.astype('float32')
lowerCAmelCase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowerCAmelCase_ = out * 255
lowerCAmelCase_ = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 719
|
import copy
import re
class UpperCamelCase :
"""simple docstring"""
snake_case = "hp"
snake_case = {}
snake_case = None
@classmethod
def A( cls : Union[str, Any] ,_SCREAMING_SNAKE_CASE : int ,_SCREAMING_SNAKE_CASE : List[Any] ) -> int:
'''simple docstring'''
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def A( _SCREAMING_SNAKE_CASE : List[str] ,_SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if len(_SCREAMING_SNAKE_CASE ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(_SCREAMING_SNAKE_CASE ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_SCREAMING_SNAKE_CASE : Optional[int] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 1_0 ) + s
integer //= 1_0
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(_SCREAMING_SNAKE_CASE )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def A( _SCREAMING_SNAKE_CASE : Optional[int] ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
'''simple docstring'''
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(_SCREAMING_SNAKE_CASE )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def A( _SCREAMING_SNAKE_CASE : Optional[Any] ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
'''simple docstring'''
A = TrialShortNamer.shortname_for_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A = short_name
A = param_name
@classmethod
def A( cls : Optional[int] ) -> str:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
A = info
@classmethod
def A( cls : List[str] ,_SCREAMING_SNAKE_CASE : str ) -> List[Any]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
A = 1 if v else 0
A = '' if isinstance(_SCREAMING_SNAKE_CASE ,(int, float) ) else '-'
A = f'{key}{sep}{v}'
name.append(_SCREAMING_SNAKE_CASE )
return "_".join(_SCREAMING_SNAKE_CASE )
@classmethod
def A( cls : int ,_SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
'''simple docstring'''
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,_SCREAMING_SNAKE_CASE )
A = float(re.sub('[^0-9.]' ,'' ,_SCREAMING_SNAKE_CASE ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters
| 110
| 0
|
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__UpperCamelCase : Optional[int] = [
# tf -> hf
('''/''', '''.'''),
('''layer_''', '''layers.'''),
('''kernel''', '''weight'''),
('''beta''', '''bias'''),
('''gamma''', '''weight'''),
('''pegasus''', '''model'''),
]
__UpperCamelCase : str = [
('''.output.dense''', '''.fc2'''),
('''intermediate.LayerNorm''', '''final_layer_norm'''),
('''intermediate.dense''', '''fc1'''),
]
__UpperCamelCase : str = (
INIT_COMMON
+ [
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.out_proj'''),
('''attention.self''', '''self_attn'''),
('''attention.encdec.LayerNorm''', '''encoder_attn_layer_norm'''),
('''attention.encdec_output.dense''', '''encoder_attn.out_proj'''),
('''attention.encdec''', '''encoder_attn'''),
('''key''', '''k_proj'''),
('''value''', '''v_proj'''),
('''query''', '''q_proj'''),
('''decoder.LayerNorm''', '''decoder.layernorm_embedding'''),
]
+ END_COMMON
)
__UpperCamelCase : List[str] = (
INIT_COMMON
+ [
('''embeddings.word_embeddings''', '''shared.weight'''),
('''embeddings.position_embeddings''', '''embed_positions.weight'''),
('''attention.self.LayerNorm''', '''self_attn_layer_norm'''),
('''attention.output.dense''', '''self_attn.output'''),
('''attention.self''', '''self_attn.self'''),
('''encoder.LayerNorm''', '''encoder.layernorm_embedding'''),
]
+ END_COMMON
)
__UpperCamelCase : Union[str, Any] = [
'''encdec/key/bias''',
'''encdec/query/bias''',
'''encdec/value/bias''',
'''self/key/bias''',
'''self/query/bias''',
'''self/value/bias''',
'''encdec_output/dense/bias''',
'''attention/output/dense/bias''',
]
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] ):
for tf_name, hf_name in patterns:
lowerCAmelCase = k.replace(_UpperCAmelCase , _UpperCAmelCase )
return k
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : dict , _UpperCAmelCase : dict ):
lowerCAmelCase = BigBirdPegasusConfig(**_UpperCAmelCase )
lowerCAmelCase = BigBirdPegasusForConditionalGeneration(_UpperCAmelCase )
lowerCAmelCase = torch_model.state_dict()
lowerCAmelCase = {}
# separating decoder weights
lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
lowerCAmelCase = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
lowerCAmelCase = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
lowerCAmelCase = DECODER_PATTERNS
lowerCAmelCase = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
lowerCAmelCase = v.T
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase )
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
lowerCAmelCase = [k.endswith(_UpperCAmelCase ) for ending in KEYS_TO_IGNORE]
if any(_UpperCAmelCase ):
continue
lowerCAmelCase = REMAINING_PATTERNS
lowerCAmelCase = rename_state_dict_key(_UpperCAmelCase , _UpperCAmelCase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
lowerCAmelCase = v.T
lowerCAmelCase = torch.from_numpy(_UpperCAmelCase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, F'{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}'
lowerCAmelCase = mapping['model.embed_positions.weight']
lowerCAmelCase = mapping.pop('model.embed_positions.weight' )
lowerCAmelCase ,lowerCAmelCase = torch_model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
lowerCAmelCase = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[Any] ):
lowerCAmelCase = tf.train.list_variables(_UpperCAmelCase )
lowerCAmelCase = {}
lowerCAmelCase = ['global_step']
for name, shape in tqdm(_UpperCAmelCase , desc='converting tf checkpoint to dict' ):
lowerCAmelCase = any(pat in name for pat in ignore_name )
if skip_key:
continue
lowerCAmelCase = tf.train.load_variable(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = array
return tf_weights
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : dict ):
lowerCAmelCase = get_tf_weights_as_numpy(_UpperCAmelCase )
lowerCAmelCase = convert_bigbird_pegasus(_UpperCAmelCase , _UpperCAmelCase )
torch_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
__UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''')
parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''')
__UpperCamelCase : Optional[int] = parser.parse_args()
__UpperCamelCase : Optional[Any] = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 4
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for attribute in key.split('''.''' ):
__lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase )
if weight_type is not None:
__lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape
else:
__lowercase : int = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase : List[str] = value
elif weight_type == "weight_g":
__lowercase : Optional[Any] = value
elif weight_type == "weight_v":
__lowercase : Tuple = value
elif weight_type == "bias":
__lowercase : Dict = value
else:
__lowercase : Union[str, Any] = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : Tuple = []
__lowercase : Union[str, Any] = fairseq_model.state_dict()
__lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowercase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , )
__lowercase : List[str] = True
else:
for key, mapped_key in MAPPING.items():
__lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
__lowercase : int = True
if "*" in mapped_key:
__lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2]
__lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase )
if "weight_g" in name:
__lowercase : Tuple = '''weight_g'''
elif "weight_v" in name:
__lowercase : Optional[int] = '''weight_v'''
elif "weight" in name:
__lowercase : str = '''weight'''
elif "bias" in name:
__lowercase : Optional[int] = '''bias'''
else:
__lowercase : List[str] = None
set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
continue
if not is_used:
unused_weights.append(__UpperCamelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
__lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1]
__lowercase : str = name.split('''.''' )
__lowercase : Dict = int(items[0] )
__lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase : List[str] = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase : Tuple = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase : Union[str, Any] = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase : Tuple = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCamelCase )
@torch.no_grad()
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ):
if config_path is not None:
__lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase )
else:
__lowercase : str = HubertConfig()
if is_finetuned:
if dict_path:
__lowercase : Tuple = Dictionary.load(__UpperCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowercase : int = target_dict.pad_index
__lowercase : Union[str, Any] = target_dict.bos_index
__lowercase : int = target_dict.eos_index
__lowercase : int = len(target_dict.symbols )
__lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' )
if not os.path.isdir(__UpperCamelCase ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) )
return
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , __UpperCamelCase )
__lowercase : str = WavaVecaCTCTokenizer(
__UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , )
__lowercase : str = True if config.feat_extract_norm == '''layer''' else False
__lowercase : Any = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , )
__lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase )
processor.save_pretrained(__UpperCamelCase )
__lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase )
else:
__lowercase : Union[str, Any] = HubertModel(__UpperCamelCase )
if is_finetuned:
__lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
__lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowercase : Union[str, Any] = model[0].eval()
recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
hf_wavavec.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
a_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 76
| 0
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : str = {
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class lowercase ( __UpperCAmelCase):
__lowerCAmelCase : Tuple = """sew"""
def __init__( self : Optional[int] , _lowerCamelCase : List[str]=32 , _lowerCamelCase : Dict=7_68 , _lowerCamelCase : List[str]=12 , _lowerCamelCase : int=12 , _lowerCamelCase : str=30_72 , _lowerCamelCase : Union[str, Any]=2 , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : List[Any]=0.1 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Dict=0.1 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : Any=0.02 , _lowerCamelCase : Dict=1E-5 , _lowerCamelCase : Tuple="group" , _lowerCamelCase : List[Any]="gelu" , _lowerCamelCase : Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _lowerCamelCase : List[str]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowerCamelCase : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowerCamelCase : Optional[int]=False , _lowerCamelCase : Optional[int]=1_28 , _lowerCamelCase : Optional[int]=16 , _lowerCamelCase : Dict=True , _lowerCamelCase : List[str]=0.05 , _lowerCamelCase : Tuple=10 , _lowerCamelCase : List[str]=2 , _lowerCamelCase : Union[str, Any]=0.0 , _lowerCamelCase : List[Any]=10 , _lowerCamelCase : Optional[Any]=0 , _lowerCamelCase : Union[str, Any]="mean" , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=False , _lowerCamelCase : Dict=2_56 , _lowerCamelCase : Union[str, Any]=0 , _lowerCamelCase : Optional[Any]=1 , _lowerCamelCase : Dict=2 , **_lowerCamelCase : Any , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase )
A_ : Tuple = hidden_size
A_ : int = feat_extract_norm
A_ : List[Any] = feat_extract_activation
A_ : Union[str, Any] = list(_lowerCamelCase )
A_ : List[Any] = list(_lowerCamelCase )
A_ : Optional[Any] = list(_lowerCamelCase )
A_ : int = conv_bias
A_ : Dict = num_conv_pos_embeddings
A_ : Union[str, Any] = num_conv_pos_embedding_groups
A_ : Tuple = len(self.conv_dim )
A_ : int = num_hidden_layers
A_ : Tuple = intermediate_size
A_ : Any = squeeze_factor
A_ : int = hidden_act
A_ : int = num_attention_heads
A_ : Tuple = hidden_dropout
A_ : Union[str, Any] = attention_dropout
A_ : Tuple = activation_dropout
A_ : Tuple = feat_proj_dropout
A_ : Union[str, Any] = final_dropout
A_ : Optional[Any] = layerdrop
A_ : Optional[Any] = layer_norm_eps
A_ : str = initializer_range
A_ : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Dict = apply_spec_augment
A_ : Union[str, Any] = mask_time_prob
A_ : Dict = mask_time_length
A_ : Union[str, Any] = mask_time_min_masks
A_ : int = mask_feature_prob
A_ : List[Any] = mask_feature_length
A_ : List[Any] = mask_feature_min_masks
# ctc loss
A_ : int = ctc_loss_reduction
A_ : str = ctc_zero_infinity
# sequence classification
A_ : Dict = use_weighted_layer_sum
A_ : Tuple = classifier_proj_size
@property
def a_ ( self : Tuple ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 361
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase_ ( _UpperCAmelCase = "https://www.worldometers.info/coronavirus" ):
"""simple docstring"""
A_ : str = BeautifulSoup(requests.get(_UpperCAmelCase ).text , '''html.parser''' )
A_ : Union[str, Any] = soup.findAll('''h1''' )
A_ : Union[str, Any] = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCAmelCase , _UpperCAmelCase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n')
| 361
| 1
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
UpperCAmelCase : Any = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def a__ ( a__=True ):
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=a ) )
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any ) -> str:
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = dataset_module_factory(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=__SCREAMING_SNAKE_CASE , config_name=__SCREAMING_SNAKE_CASE , hash=dataset_module.hash , )
__SCREAMING_SNAKE_CASE = """/""".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=__SCREAMING_SNAKE_CASE ).replace(os.sep , """/""" ),
config.DATASET_INFO_FILENAME,
] )
__SCREAMING_SNAKE_CASE = cached_path(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE )
self.assertTrue(os.path.exists(__SCREAMING_SNAKE_CASE ) )
@pytest.mark.integration
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""test_hf_gcp""" ) / """test_wikipedia_simple"""
__SCREAMING_SNAKE_CASE = dataset_module_factory("""wikipedia""" , cache_dir=a__ )
__SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path )
__SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__SCREAMING_SNAKE_CASE = None
builder_instance.download_and_prepare()
__SCREAMING_SNAKE_CASE = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = dataset_module_factory("""wikipedia""" , cache_dir=a__ )
__SCREAMING_SNAKE_CASE = import_main_class(dataset_module.module_path , dataset=a__ )
__SCREAMING_SNAKE_CASE = builder_cls(
cache_dir=a__ , config_name="""20220301.frr""" , hash=dataset_module.hash , )
__SCREAMING_SNAKE_CASE = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a__ , a__ )
assert "train" in ds
assert isinstance(ds["""train"""] , a__ )
assert next(iter(ds["""train"""] ) )
| 627
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase : Any = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "mra"
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Dict=50_265 , __SCREAMING_SNAKE_CASE : Union[str, Any]=768 , __SCREAMING_SNAKE_CASE : int=12 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=3_072 , __SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=0.1 , __SCREAMING_SNAKE_CASE : List[Any]=512 , __SCREAMING_SNAKE_CASE : Union[str, Any]=1 , __SCREAMING_SNAKE_CASE : Any=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-5 , __SCREAMING_SNAKE_CASE : str="absolute" , __SCREAMING_SNAKE_CASE : List[Any]=4 , __SCREAMING_SNAKE_CASE : Optional[Any]="full" , __SCREAMING_SNAKE_CASE : str=0 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : Dict=0 , __SCREAMING_SNAKE_CASE : Dict=2 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = block_per_row
__SCREAMING_SNAKE_CASE = approx_mode
__SCREAMING_SNAKE_CASE = initial_prior_first_n_blocks
__SCREAMING_SNAKE_CASE = initial_prior_diagonal_n_blocks
| 627
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE_ )
class _a ( SCREAMING_SNAKE_CASE_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
a_ : str = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
a_ : ClassVar[Features] = Features({'question': Value('string' ), 'context': Value('string' )} )
a_ : ClassVar[Features] = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
a_ : str = "question"
a_ : str = "context"
a_ : str = "answers"
@property
def _UpperCamelCase ( self : int ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 659
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_snake_case = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
_snake_case = {
"facebook/nllb-large-en-ro": 1024,
"facebook/nllb-200-distilled-600M": 1024,
}
# fmt: off
_snake_case = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Any = VOCAB_FILES_NAMES
a_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a_ : List[str] = ['input_ids', 'attention_mask']
a_ : Union[str, Any] = NllbTokenizer
a_ : List[int] = []
a_ : List[int] = []
def __init__( self : int , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : List[Any]="<s>" , SCREAMING_SNAKE_CASE__ : List[str]="</s>" , SCREAMING_SNAKE_CASE__ : Any="</s>" , SCREAMING_SNAKE_CASE__ : List[str]="<s>" , SCREAMING_SNAKE_CASE__ : Tuple="<unk>" , SCREAMING_SNAKE_CASE__ : Optional[int]="<pad>" , SCREAMING_SNAKE_CASE__ : Any="<mask>" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Tuple=False , **SCREAMING_SNAKE_CASE__ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase__ = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else mask_token
lowerCamelCase__ = legacy_behaviour
super().__init__(
vocab_file=SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ , additional_special_tokens=SCREAMING_SNAKE_CASE__ , legacy_behaviour=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = vocab_file
lowerCamelCase__ = False if not self.vocab_file else True
lowerCamelCase__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCamelCase__ = {
lang_code: self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase__ = src_lang if src_lang is not None else 'eng_Latn'
lowerCamelCase__ = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCamelCase ( self : str ):
return self._src_lang
@src_lang.setter
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _UpperCamelCase ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCamelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[int] , SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ):
lowerCamelCase__ = [self.sep_token_id]
lowerCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] , SCREAMING_SNAKE_CASE__ : Optional[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCamelCase__ = src_lang
lowerCamelCase__ = self(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = tgt_lang_id
return inputs
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str = "eng_Latn" , SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE__ : str = "fra_Latn" , **SCREAMING_SNAKE_CASE__ : Dict , ):
lowerCamelCase__ = src_lang
lowerCamelCase__ = tgt_lang
return super().prepare_seqaseq_batch(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[str] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCamelCase ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCamelCase ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
lowerCamelCase__ = self.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ )
if self.legacy_behaviour:
lowerCamelCase__ = []
lowerCamelCase__ = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase__ = [self.cur_lang_code]
lowerCamelCase__ = [self.eos_token_id]
lowerCamelCase__ = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase__ = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _UpperCamelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory.' )
return
lowerCamelCase__ = os.path.join(
SCREAMING_SNAKE_CASE__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE__ ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE__ )
return (out_vocab_file,)
| 659
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 93
|
"""simple docstring"""
def _lowerCamelCase ( UpperCAmelCase_ : bytes ) -> str:
"""simple docstring"""
return "".join([hex(UpperCAmelCase_ )[2:].zfill(2 ).upper() for byte in list(UpperCAmelCase_ )] )
def _lowerCamelCase ( UpperCAmelCase_ : str ) -> bytes:
"""simple docstring"""
if (len(UpperCAmelCase_ ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(UpperCAmelCase_ ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1], 16 ) for i in range(0, len(UpperCAmelCase_ ), 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 104
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class lowerCAmelCase__ ( A_ ):
__a = 42
class lowerCAmelCase__ ( A_ ):
def __init__( self : List[Any] , _lowerCamelCase : PriorTransformer , _lowerCamelCase : CLIPVisionModel , _lowerCamelCase : CLIPImageProcessor , _lowerCamelCase : HeunDiscreteScheduler , _lowerCamelCase : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_lowerCamelCase , image_encoder=_lowerCamelCase , image_processor=_lowerCamelCase , scheduler=_lowerCamelCase , renderer=_lowerCamelCase , )
def lowercase ( self : Optional[int] , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict ):
if latents is None:
_snake_case = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=_lowerCamelCase , dtype=_lowerCamelCase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_snake_case = latents.to(_lowerCamelCase )
_snake_case = latents * scheduler.init_noise_sigma
return latents
def lowercase ( self : Any , _lowerCamelCase : Optional[int]=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
_snake_case = torch.device(f'''cuda:{gpu_id}''' )
_snake_case = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowerCamelCase , _lowerCamelCase )
@property
def lowercase ( self : str ):
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_lowerCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def lowercase ( self : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , ):
if isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , torch.Tensor ):
_snake_case = torch.cat(_lowerCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_lowerCamelCase , axis=0 )
if not isinstance(_lowerCamelCase , torch.Tensor ):
_snake_case = self.image_processor(_lowerCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
_snake_case = image.to(dtype=self.image_encoder.dtype , device=_lowerCamelCase )
_snake_case = self.image_encoder(_lowerCamelCase )['''last_hidden_state''']
_snake_case = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
_snake_case = image_embeds.repeat_interleave(_lowerCamelCase , dim=0 )
if do_classifier_free_guidance:
_snake_case = torch.zeros_like(_lowerCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_snake_case = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_lowerCamelCase )
def __call__( self : Dict , _lowerCamelCase : Union[PIL.Image.Image, List[PIL.Image.Image]] , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 25 , _lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCamelCase : Optional[torch.FloatTensor] = None , _lowerCamelCase : float = 4.0 , _lowerCamelCase : int = 64 , _lowerCamelCase : Optional[str] = "pil" , _lowerCamelCase : bool = True , ):
if isinstance(_lowerCamelCase , PIL.Image.Image ):
_snake_case = 1
elif isinstance(_lowerCamelCase , torch.Tensor ):
_snake_case = image.shape[0]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
_snake_case = len(_lowerCamelCase )
else:
raise ValueError(
f'''`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_lowerCamelCase )}''' )
_snake_case = self._execution_device
_snake_case = batch_size * num_images_per_prompt
_snake_case = guidance_scale > 1.0
_snake_case = self._encode_image(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# prior
self.scheduler.set_timesteps(_lowerCamelCase , device=_lowerCamelCase )
_snake_case = self.scheduler.timesteps
_snake_case = self.prior.config.num_embeddings
_snake_case = self.prior.config.embedding_dim
_snake_case = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
_snake_case = latents.reshape(latents.shape[0] , _lowerCamelCase , _lowerCamelCase )
for i, t in enumerate(self.progress_bar(_lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
_snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_snake_case = self.scheduler.scale_model_input(_lowerCamelCase , _lowerCamelCase )
_snake_case = self.prior(
_lowerCamelCase , timestep=_lowerCamelCase , proj_embedding=_lowerCamelCase , ).predicted_image_embedding
# remove the variance
_snake_case , _snake_case = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
_snake_case , _snake_case = noise_pred.chunk(2 )
_snake_case = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
_snake_case = self.scheduler.step(
_lowerCamelCase , timestep=_lowerCamelCase , sample=_lowerCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_lowerCamelCase )
_snake_case = []
for i, latent in enumerate(_lowerCamelCase ):
print()
_snake_case = self.renderer.decode(
latent[None, :] , _lowerCamelCase , size=_lowerCamelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_lowerCamelCase )
_snake_case = torch.stack(_lowerCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f'''Only the output types `pil` and `np` are supported not output_type={output_type}''' )
_snake_case = images.cpu().numpy()
if output_type == "pil":
_snake_case = [self.numpy_to_pil(_lowerCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_lowerCamelCase )
| 430
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def _UpperCAmelCase ( __lowerCamelCase : int ) -> Tuple:
# A local function to see if a dot lands in the circle.
def is_in_circle(__lowerCamelCase : float , __lowerCamelCase : float ) -> bool:
_snake_case = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
_snake_case = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(__lowerCamelCase ) )
# The ratio of the area for circle to square is pi/4.
_snake_case = proportion * 4
print(f'''The estimated value of pi is {pi_estimate}''' )
print(f'''The numpy value of pi is {pi}''' )
print(f'''The total error is {abs(pi - pi_estimate )}''' )
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : Callable[[float], float] , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(__lowerCamelCase , __lowerCamelCase ) ) for _ in range(__lowerCamelCase ) ) * (max_value - min_value)
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : float = 0.0 , __lowerCamelCase : float = 1.0 ) -> None:
def identity_function(__lowerCamelCase : float ) -> float:
return x
_snake_case = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
_snake_case = (max_value * max_value - min_value * min_value) / 2
print('''******************''' )
print(f'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {expected_value}''' )
print(f'''Total error is {abs(estimated_value - expected_value )}''' )
print('''******************''' )
def _UpperCAmelCase ( __lowerCamelCase : int ) -> None:
def function_to_integrate(__lowerCamelCase : float ) -> float:
return sqrt(4.0 - x * x )
_snake_case = area_under_curve_estimator(
__lowerCamelCase , __lowerCamelCase , 0.0 , 2.0 )
print('''******************''' )
print('''Estimating pi using area_under_curve_estimator''' )
print(f'''Estimated value is {estimated_value}''' )
print(f'''Expected value is {pi}''' )
print(f'''Total error is {abs(estimated_value - pi )}''' )
print('''******************''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 430
| 1
|
'''simple docstring'''
def lowerCamelCase( SCREAMING_SNAKE_CASE_ = 6008_5147_5143 ) -> int:
try:
A_ = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A_ = 2
A_ = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A_ = i
while n % i == 0:
A_ = n // i
i += 1
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 366
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = """mvp"""
_UpperCamelCase = ["""past_key_values"""]
_UpperCamelCase = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str] , _lowercase : int=50_267 , _lowercase : str=1_024 , _lowercase : int=12 , _lowercase : int=4_096 , _lowercase : Tuple=16 , _lowercase : Optional[Any]=12 , _lowercase : Union[str, Any]=4_096 , _lowercase : List[Any]=16 , _lowercase : str=0.0 , _lowercase : List[str]=0.0 , _lowercase : Dict="gelu" , _lowercase : Union[str, Any]=1_024 , _lowercase : str=0.1 , _lowercase : List[Any]=0.0 , _lowercase : Any=0.0 , _lowercase : List[str]=0.02 , _lowercase : List[Any]=0.0 , _lowercase : List[str]=False , _lowercase : Tuple=True , _lowercase : Any=1 , _lowercase : Optional[Any]=0 , _lowercase : Dict=2 , _lowercase : int=True , _lowercase : Any=2 , _lowercase : Tuple=2 , _lowercase : Optional[Any]=False , _lowercase : List[str]=100 , _lowercase : List[str]=800 , **_lowercase : Dict , ) -> Dict:
A_ = vocab_size
A_ = max_position_embeddings
A_ = d_model
A_ = encoder_ffn_dim
A_ = encoder_layers
A_ = encoder_attention_heads
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = encoder_layerdrop
A_ = decoder_layerdrop
A_ = classifier_dropout
A_ = use_cache
A_ = encoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = use_prompt
A_ = prompt_length
A_ = prompt_mid_dim
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , forced_eos_token_id=_lowercase , **_lowercase , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _lowercase):
A_ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
'The config can simply be saved and uploaded again to be fixed.')
| 366
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__A : List[str] = VQModel
__A : Any = "sample"
@property
def __snake_case ( self : int , snake_case__ : int=(3_2, 3_2) ):
'''simple docstring'''
lowercase :Optional[int] = 4
lowercase :Tuple = 3
lowercase :List[str] = floats_tensor((batch_size, num_channels) + sizes ).to(snake_case__ )
return {"sample": image}
@property
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __snake_case ( self : List[str] ):
'''simple docstring'''
return (3, 3_2, 3_2)
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase :Optional[int] = {
'''block_out_channels''': [3_2, 6_4],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 3,
}
lowercase :Dict = self.dummy_input
return init_dict, inputs_dict
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
pass
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
pass
def __snake_case ( self : Dict ):
'''simple docstring'''
lowercase , lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' , output_loading_info=snake_case__ )
self.assertIsNotNone(snake_case__ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(snake_case__ )
lowercase :List[str] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Optional[int] = VQModel.from_pretrained('''fusing/vqgan-dummy''' )
model.to(snake_case__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowercase :List[Any] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowercase :int = image.to(snake_case__ )
with torch.no_grad():
lowercase :str = model(snake_case__ ).sample
lowercase :List[Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowercase :Tuple = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43] )
# fmt: on
self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) )
| 475
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE_ = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def __snake_case ( _lowercase = "mumbai" ):
"""simple docstring"""
UpperCamelCase = BeautifulSoup(requests.get(url + location ).content ,'''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' ,attrs={'''data-tn-component''': '''organicJob'''} ):
UpperCamelCase = job.find('''a''' ,attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
UpperCamelCase = job.find('''span''' ,{'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f'Job {i:>2} is {job[0]} at {job[1]}')
| 34
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "char"
_UpperCAmelCase = "bpe"
_UpperCAmelCase = "wp"
__UpperCamelCase : Optional[Any] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = ["image_processor", "char_tokenizer"]
_UpperCAmelCase = "ViTImageProcessor"
_UpperCAmelCase = "MgpstrTokenizer"
def __init__( self: Optional[int] , UpperCamelCase: Dict=None , UpperCamelCase: Any=None , **UpperCamelCase: Any ) -> str:
snake_case__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , UpperCamelCase , )
snake_case__ = kwargs.pop('feature_extractor' )
snake_case__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
snake_case__ = tokenizer
snake_case__ = AutoTokenizer.from_pretrained('gpt2' )
snake_case__ = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(UpperCamelCase , UpperCamelCase )
def __call__( self: str , UpperCamelCase: List[str]=None , UpperCamelCase: Any=None , UpperCamelCase: Optional[Any]=None , **UpperCamelCase: Optional[int] ) -> List[str]:
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
snake_case__ = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None:
snake_case__ = self.char_tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case__ = encodings['input_ids']
return inputs
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> int:
snake_case__ , snake_case__ , snake_case__ = sequences
snake_case__ = char_preds.size(0 )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'char' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'bpe' )
snake_case__ , snake_case__ = self._decode_helper(UpperCamelCase , 'wp' )
snake_case__ = []
snake_case__ = []
for i in range(UpperCamelCase ):
snake_case__ = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case__ = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case__ = scores.index(max(UpperCamelCase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case__ = {}
snake_case__ = final_strs
snake_case__ = final_scores
snake_case__ = char_strs
snake_case__ = bpe_strs
snake_case__ = wp_strs
return out
def lowerCAmelCase_ ( self: str , UpperCamelCase: str , UpperCamelCase: Tuple ) -> Optional[int]:
if format == DecodeType.CHARACTER:
snake_case__ = self.char_decode
snake_case__ = 1
snake_case__ = '[s]'
elif format == DecodeType.BPE:
snake_case__ = self.bpe_decode
snake_case__ = 2
snake_case__ = '#'
elif format == DecodeType.WORDPIECE:
snake_case__ = self.wp_decode
snake_case__ = 1_02
snake_case__ = '[SEP]'
else:
raise ValueError(F'''Format {format} is not supported.''' )
snake_case__ , snake_case__ = [], []
snake_case__ = pred_logits.size(0 )
snake_case__ = pred_logits.size(1 )
snake_case__ , snake_case__ = pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase , sorted=UpperCamelCase )
snake_case__ = preds_index.view(-1 , UpperCamelCase )[:, 1:]
snake_case__ = decoder(UpperCamelCase )
snake_case__ , snake_case__ = torch.nn.functional.softmax(UpperCamelCase , dim=2 ).max(dim=2 )
snake_case__ = preds_max_prob[:, 1:]
for index in range(UpperCamelCase ):
snake_case__ = preds_str[index].find(UpperCamelCase )
snake_case__ = preds_str[index][:pred_eos]
snake_case__ = preds_index[index].cpu().tolist()
snake_case__ = pred_index.index(UpperCamelCase ) if eos_token in pred_index else -1
snake_case__ = preds_max_prob[index][: pred_eos_index + 1]
snake_case__ = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase )
conf_scores.append(UpperCamelCase )
return dec_strs, conf_scores
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> int:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
def lowerCAmelCase_ ( self: int , UpperCamelCase: Optional[int] ) -> Dict:
return self.bpe_tokenizer.batch_decode(UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: str ) -> Union[str, Any]:
snake_case__ = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase )]
return decode_strs
| 328
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_a , )
return config, input_ids, attention_mask
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self ):
__a = FlaxDistilBertModelTester(self )
@slow
def __UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('''distilbert-base-uncased''' )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__a = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
__a = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__a = model(_a , attention_mask=_a )[0]
__a = (1, 11, 768)
self.assertEqual(output.shape , _a )
__a = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) )
| 65
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = SpeechTaTokenizer
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = SpeechTaTokenizer(_a )
__a = AddedToken('''<mask>''' , lstrip=_a , rstrip=_a )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self , _a ):
__a = '''this is a test'''
__a = '''this is a test'''
return input_text, output_text
def __UpperCAmelCase ( self , _a , _a=False , _a=20 , _a=5 ):
__a , __a = self.get_input_output_texts(_a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-4] , '''œ''' )
self.assertEqual(vocab_keys[-2] , '''<mask>''' )
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''' )
self.assertEqual(len(_a ) , 81 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__a = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
__a = tokenizer.add_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
__a = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__a = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
__a = tokenizer.add_special_tokens(_a )
__a = tokenizer.vocab_size
__a = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
__a = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
__a = tokenizer.tokenize('''This is a test''' )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
__a = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''] )
@slow
def __UpperCAmelCase ( self ):
# Use custom sequence because this tokenizer does not handle numbers.
__a = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
__a = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_a , )
| 65
| 1
|
'''simple docstring'''
import baseaa
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return baseaa.aaaencode(string.encode("""utf-8""" ) )
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
return baseaa.aaadecode(_SCREAMING_SNAKE_CASE ).decode("""utf-8""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def lowercase (self ) -> Optional[Any]:
torch.manual_seed(0 )
_snake_case = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowercase (self ) -> Dict:
_snake_case = self.dummy_uncond_unet
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" ).images
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , num_inference_steps=20 , output_type="""numpy""" , return_dict=UpperCAmelCase )[0]
_snake_case = image[0, -3:, -3:, -1]
_snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase (self ) -> Optional[Any]:
_snake_case = """google/ddpm-cifar10-32"""
_snake_case = UNetaDModel.from_pretrained(UpperCAmelCase )
_snake_case = PNDMScheduler()
_snake_case = PNDMPipeline(unet=UpperCAmelCase , scheduler=UpperCAmelCase )
pndm.to(UpperCAmelCase )
pndm.set_progress_bar_config(disable=UpperCAmelCase )
_snake_case = torch.manual_seed(0 )
_snake_case = pndm(generator=UpperCAmelCase , output_type="""numpy""" ).images
_snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_snake_case = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 585
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase = {
'vocab_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'
),
'google/realm-orqa-nq-openqa': 'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-nq-reader': 'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt',
'google/realm-orqa-wq-openqa': 'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt',
'google/realm-orqa-wq-reader': 'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt',
},
'tokenizer_file': {
'google/realm-cc-news-pretrained-embedder': (
'https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'
),
'google/realm-cc-news-pretrained-encoder': (
'https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-scorer': (
'https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'
),
'google/realm-cc-news-pretrained-openqa': (
'https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'
),
'google/realm-orqa-nq-openqa': (
'https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-nq-reader': (
'https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-openqa': (
'https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'
),
'google/realm-orqa-wq-reader': (
'https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase = {
'google/realm-cc-news-pretrained-embedder': 512,
'google/realm-cc-news-pretrained-encoder': 512,
'google/realm-cc-news-pretrained-scorer': 512,
'google/realm-cc-news-pretrained-openqa': 512,
'google/realm-orqa-nq-openqa': 512,
'google/realm-orqa-nq-reader': 512,
'google/realm-orqa-wq-openqa': 512,
'google/realm-orqa-wq-reader': 512,
}
lowerCAmelCase = {
'google/realm-cc-news-pretrained-embedder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-encoder': {'do_lower_case': True},
'google/realm-cc-news-pretrained-scorer': {'do_lower_case': True},
'google/realm-cc-news-pretrained-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-openqa': {'do_lower_case': True},
'google/realm-orqa-nq-reader': {'do_lower_case': True},
'google/realm-orqa-wq-openqa': {'do_lower_case': True},
'google/realm-orqa-wq-reader': {'do_lower_case': True},
}
class _a ( UpperCamelCase__ ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_lowercase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowercase : Optional[int] = RealmTokenizer
def __init__( self: Union[str, Any] , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: Any=None , UpperCamelCase_: str=True , UpperCamelCase_: Union[str, Any]="[UNK]" , UpperCamelCase_: Dict="[SEP]" , UpperCamelCase_: Union[str, Any]="[PAD]" , UpperCamelCase_: Optional[int]="[CLS]" , UpperCamelCase_: int="[MASK]" , UpperCamelCase_: str=True , UpperCamelCase_: Optional[int]=None , **UpperCamelCase_: Dict , ) -> str:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars
):
lowercase__ = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**UpperCamelCase_ )
lowercase__ = do_lower_case
def lowerCamelCase_ ( self: Any , UpperCamelCase_: Any , **UpperCamelCase_: str ) -> Tuple:
"""simple docstring"""
lowercase__ = PaddingStrategy.MAX_LENGTH
lowercase__ = text
lowercase__ = kwargs.pop('''text_pair''' , UpperCamelCase_ )
lowercase__ = kwargs.pop('''return_tensors''' , UpperCamelCase_ )
lowercase__ = {
'''input_ids''': [],
'''attention_mask''': [],
'''token_type_ids''': [],
}
for idx, candidate_text in enumerate(UpperCamelCase_ ):
if batch_text_pair is not None:
lowercase__ = batch_text_pair[idx]
else:
lowercase__ = None
lowercase__ = super().__call__(UpperCamelCase_ , UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
lowercase__ = encoded_candidates.get('''input_ids''' )
lowercase__ = encoded_candidates.get('''attention_mask''' )
lowercase__ = encoded_candidates.get('''token_type_ids''' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase_ )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase_ )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase_ )
lowercase__ = {key: item for key, item in output_data.items() if len(UpperCamelCase_ ) != 0}
return BatchEncoding(UpperCamelCase_ , tensor_type=UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: List[Any]=None ) -> int:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase_: List[int] , UpperCamelCase_: Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase_ ( self: int , UpperCamelCase_: str , UpperCamelCase_: Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 429
|
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 429
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=[1, 16, 4, 4] , _UpperCAmelCase=None , ):
__a : List[Any] = parent
__a : List[Any] = batch_size
__a : int = image_size
__a : List[Any] = patch_size
__a : Optional[Any] = num_channels
__a : int = is_training
__a : str = use_labels
__a : int = hidden_size
__a : Any = num_hidden_layers
__a : Dict = num_attention_heads
__a : Any = intermediate_size
__a : Dict = hidden_act
__a : List[Any] = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : str = type_sequence_label_size
__a : Any = initializer_range
__a : List[str] = scope
__a : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__a : Any = (self.image_size // 32) ** 2
__a : int = num_patches + 1
def _lowerCamelCase ( self ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : str = None
if self.use_labels:
__a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ):
__a : Dict = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 16, 32],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=_UpperCAmelCase , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : int = ViTHybridModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Dict = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.type_sequence_label_size
__a : List[str] = ViTHybridForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__a : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self ):
__a : List[Any] = self.prepare_config_and_inputs()
__a , __a , __a : Optional[int] = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def _lowerCamelCase ( self ):
__a : List[Any] = ViTHybridModelTester(self )
__a : Optional[Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
__a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCamelCase ( self ):
__a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(_UpperCAmelCase )
__a : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Optional[int] = [*signature.parameters.keys()]
__a : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : int = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
__a : Tuple = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__a : Union[str, Any] = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _lowerCamelCase ( self ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : int = ViTHybridModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __A ( ) -> Tuple:
__a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCamelCase ( self ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self ):
__a : Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_UpperCAmelCase )
__a : List[Any] = self.default_image_processor
__a : Dict = prepare_img()
__a : List[str] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__a : List[str] = model(**_UpperCAmelCase )
# verify the logits
__a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__a : List[str] = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
def _lowerCamelCase ( self ):
__a : List[Any] = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
__a : List[str] = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
__a : Optional[int] = prepare_img()
__a : Union[str, Any] = image_processor(images=_UpperCAmelCase , return_tensors='''pt''' )
__a : Dict = model(**_UpperCAmelCase )
__a : List[str] = outputs.logits
# model predicts one of the 1000 ImageNet classes
__a : Dict = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 52
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=4 , ):
__a : Any = parent
__a : Optional[int] = batch_size
__a : str = seq_length
__a : List[str] = is_training
__a : Optional[Any] = use_attention_mask
__a : Optional[Any] = use_token_type_ids
__a : List[str] = use_labels
__a : Union[str, Any] = vocab_size
__a : int = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Dict = intermediate_size
__a : List[str] = hidden_act
__a : Dict = hidden_dropout_prob
__a : Union[str, Any] = attention_probs_dropout_prob
__a : int = max_position_embeddings
__a : Tuple = type_vocab_size
__a : Optional[int] = type_sequence_label_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = num_choices
def _lowerCamelCase ( self ):
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Union[str, Any] = None
if self.use_attention_mask:
__a : Any = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[int] = None
if self.use_token_type_ids:
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a : Any = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowerCamelCase ( self ):
__a : Dict = self.prepare_config_and_inputs()
__a , __a , __a , __a : str = config_and_inputs
__a : str = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def _lowerCamelCase ( self ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a , __a : Union[str, Any] = config_and_inputs
__a : Optional[int] = True
__a : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowerCamelCase ( self ):
__a : Dict = FlaxRobertaModelTester(self )
@slow
def _lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__a : int = model_class_name.from_pretrained('''roberta-base''' , from_pt=_UpperCAmelCase )
__a : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
| 52
| 1
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def lowercase_ ( __A : str , __A : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(__A ):
if os.path.exists(os.path.join(__A , '''config.json''' ) ) and os.path.isfile(
os.path.join(__A , '''config.json''' ) ):
os.remove(os.path.join(__A , '''config.json''' ) )
if os.path.exists(os.path.join(__A , '''pytorch_model.bin''' ) ) and os.path.isfile(
os.path.join(__A , '''pytorch_model.bin''' ) ):
os.remove(os.path.join(__A , '''pytorch_model.bin''' ) )
else:
os.makedirs(__A )
model.save_pretrained(__A )
def lowercase_ ( __A : int , __A : Any=False ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] =2
if unlogit:
lowercase : str =torch.pow(__A , __A )
lowercase : str =p * torch.log(__A )
lowercase : List[str] =0
return -plogp.sum(dim=-1 )
def lowercase_ ( __A : List[str] ) -> int:
"""simple docstring"""
logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(__A ) ) ) )
for row in range(len(__A ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def lowercase_ ( __A : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Optional[Any]=True , __A : int=True , __A : List[Any]=None , __A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase , lowercase : Dict =model.config.num_hidden_layers, model.config.num_attention_heads
lowercase : Optional[int] =torch.zeros(__A , __A ).to(args.device )
lowercase : Dict =torch.zeros(__A , __A ).to(args.device )
if head_mask is None:
lowercase : List[Any] =torch.ones(__A , __A ).to(args.device )
head_mask.requires_grad_(requires_grad=__A )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase : int =None
lowercase : str =0.0
lowercase : Dict =0.0
for step, inputs in enumerate(tqdm(__A , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ):
lowercase : Dict =tuple(t.to(args.device ) for t in inputs )
((lowercase) , ) : int =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase : Union[str, Any] =model(__A , labels=__A , head_mask=__A )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase , lowercase , lowercase : int =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__A ):
lowercase : Any =entropy(attn.detach() , __A )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__A ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase : Tuple =2
lowercase : Any =torch.pow(torch.pow(__A , __A ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase : List[str] =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('''Attention entropies''' )
print_ad_tensor(__A )
if compute_importance:
logger.info('''Head importance scores''' )
print_ad_tensor(__A )
logger.info('''Head ranked by importance scores''' )
lowercase : Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase : Union[str, Any] =torch.arange(
head_importance.numel() , device=args.device )
lowercase : int =head_ranks.view_as(__A )
print_ad_tensor(__A )
return attn_entropy, head_importance, total_loss
def lowercase_ ( __A : Any , __A : Dict , __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase , lowercase , lowercase : List[Any] =compute_heads_importance(__A , __A , __A , compute_entropy=__A )
lowercase : Tuple =1 / loss # instead of downsteam score use the LM loss
logger.info('''Pruning: original score: %f, threshold: %f''' , __A , original_score * args.masking_threshold )
lowercase : Tuple =torch.ones_like(__A )
lowercase : str =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase : Any =original_score
while current_score >= original_score * args.masking_threshold:
lowercase : Optional[int] =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase : Dict =float('''Inf''' )
lowercase : List[Any] =head_importance.view(-1 ).sort()[1]
if len(__A ) <= num_to_mask:
print('''BREAK BY num_to_mask''' )
break
# mask heads
lowercase : List[Any] =current_heads_to_mask[:num_to_mask]
logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) )
lowercase : Optional[int] =new_head_mask.view(-1 )
lowercase : Optional[Any] =0.0
lowercase : Dict =new_head_mask.view_as(__A )
lowercase : Any =new_head_mask.clone().detach()
print_ad_tensor(__A )
# Compute metric and head importance again
lowercase , lowercase , lowercase : List[Any] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , head_mask=__A )
lowercase : Optional[int] =1 / loss
logger.info(
'''Masking: current score: %f, remaining heads %d (%.1f percents)''' , __A , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_0_0 , )
logger.info('''Final head mask''' )
print_ad_tensor(__A )
np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowercase_ ( __A : Dict , __A : int , __A : Dict , __A : int ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] =datetime.now()
lowercase , lowercase , lowercase : int =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A )
lowercase : Tuple =1 / loss
lowercase : Union[str, Any] =datetime.now() - before_time
lowercase : Dict =sum(p.numel() for p in model.parameters() )
lowercase : Tuple ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__A ) )
}
for k, v in heads_to_prune.items():
if isinstance(__A , __A ):
lowercase : Optional[int] =[
v,
]
assert sum(len(__A ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__A )
lowercase : str =sum(p.numel() for p in model.parameters() )
lowercase : Optional[Any] =datetime.now()
lowercase , lowercase , lowercase : List[str] =compute_heads_importance(
__A , __A , __A , compute_entropy=__A , compute_importance=__A , head_mask=__A , actually_pruned=__A , )
lowercase : Any =1 / loss
lowercase : List[str] =datetime.now() - before_time
logger.info(
'''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , __A , __A , pruned_num_params / original_num_params * 1_0_0 , )
logger.info('''Pruning: score with masking: %f score with pruning: %f''' , __A , __A )
logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 1_0_0 )
save_model(__A , args.output_dir )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--data_dir''' , default=__A , type=__A , required=__A , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=__A , type=__A , required=__A , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--output_dir''' , default=__A , type=__A , required=__A , help='''The output directory where the model predictions and checkpoints will be written.''' , )
# Other parameters
parser.add_argument(
'''--config_name''' , default='''''' , type=__A , help='''Pretrained config name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--tokenizer_name''' , default='''''' , type=__A , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , )
parser.add_argument(
'''--cache_dir''' , default=__A , type=__A , help='''Where do you want to store the pre-trained models downloaded from s3''' , )
parser.add_argument(
'''--data_subset''' , type=__A , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' )
parser.add_argument(
'''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
parser.add_argument(
'''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' )
parser.add_argument(
'''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , )
parser.add_argument(
'''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' )
parser.add_argument(
'''--masking_threshold''' , default=0.9 , type=__A , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , )
parser.add_argument(
'''--masking_amount''' , default=0.1 , type=__A , help='''Amount to heads to masking at each masking step.''' )
parser.add_argument('''--metric_name''' , default='''acc''' , type=__A , help='''Metric to use for head masking.''' )
parser.add_argument(
'''--max_seq_length''' , default=1_2_8 , type=__A , help=(
'''The maximum total input sequence length after WordPiece tokenization. \n'''
'''Sequences longer than this will be truncated, sequences shorter padded.'''
) , )
parser.add_argument('''--batch_size''' , default=1 , type=__A , help='''Batch size.''' )
parser.add_argument('''--seed''' , type=__A , default=4_2 )
parser.add_argument('''--local_rank''' , type=__A , default=-1 , help='''local_rank for distributed training on gpus''' )
parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' )
parser.add_argument('''--server_ip''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=__A , default='''''' , help='''Can be used for distant debugging.''' )
lowercase : str =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__A )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase : Optional[Any] =torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' )
lowercase : Dict =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase : Dict =torch.device('''cuda''' , args.local_rank )
lowercase : Dict =1
torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase : int =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase : Union[str, Any] =nn.parallel.DistributedDataParallel(
__A , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__A )
elif args.n_gpu > 1:
lowercase : Any =nn.DataParallel(__A )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__A )
torch.save(__A , os.path.join(args.output_dir , '''run_args.bin''' ) )
logger.info('''Training/evaluation parameters %s''' , __A )
# Prepare dataset
lowercase : Union[str, Any] =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase : Dict =(torch.from_numpy(__A ),)
lowercase : Dict =TensorDataset(*__A )
lowercase : Optional[Any] =RandomSampler(__A )
lowercase : List[Any] =DataLoader(__A , sampler=__A , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__A , __A , __A )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase : Tuple =mask_heads(__A , __A , __A )
prune_heads(__A , __A , __A , __A )
if __name__ == "__main__":
main()
| 8
|
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 1
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a__ : Union[str, Any] = False
class UpperCAmelCase_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
A_ = torch.manual_seed(0 )
A_ = pipe.dual_guided(
prompt='''first prompt''' ,image=__snake_case ,text_to_image_strength=0.75 ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__snake_case )
A_ = VersatileDiffusionPipeline.from_pretrained(__snake_case ,torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = generator.manual_seed(0 )
A_ = pipe.dual_guided(
prompt='''first prompt''' ,image=__snake_case ,text_to_image_strength=0.75 ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
A_ = '''cyberpunk 2077'''
A_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
A_ = torch.manual_seed(0 )
A_ = pipe.dual_guided(
prompt=__snake_case ,image=__snake_case ,text_to_image_strength=0.75 ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A_ = '''A painting of a squirrel eating a burger '''
A_ = torch.manual_seed(0 )
A_ = pipe.text_to_image(
prompt=__snake_case ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
A_ = pipe.image_variation(__snake_case ,generator=__snake_case ,output_type='''numpy''' ).images
A_ = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
A_ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 188
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def UpperCAmelCase_ ( _UpperCAmelCase :str , _UpperCAmelCase :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = torch.permute(_UpperCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase ):
# linear layer
A_ = flax_key_tuple[:-1] + ('''weight''',)
A_ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A_ = flax_key_tuple[:-1] + ('''weight''',)
return flax_key_tuple, flax_tensor
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[int] , _UpperCAmelCase :str , _UpperCAmelCase :Any ) -> Dict:
'''simple docstring'''
if "metadata" in layer:
A_ = layer.split('''metadata''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''metadata''' + split_layer[1]).split('''/''' ) )]
elif "kvstore" in layer:
A_ = layer.split('''kvstore''' )
A_ = ''''''.join(split_layer[0] )[:-1]
A_ = [tuple(('''kvstore''' + split_layer[1]).split('''/''' ) )]
else:
A_ = layer.split('''/''' )
A_ = '''/'''.join(split_layer[:-1] )
A_ = (split_layer[-1],)
if "kvstore/path" in layer:
A_ = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
A_ = '''file'''
else:
A_ = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :Union[str, Any] ) -> Any:
'''simple docstring'''
A_ = rename_keys(_UpperCAmelCase )
A_ = {}
for k, v in current_block.items():
A_ = v
A_ = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :str , _UpperCAmelCase :List[Any] , _UpperCAmelCase :str , _UpperCAmelCase :str = WEIGHTS_NAME ) -> Optional[int]:
'''simple docstring'''
A_ = convert_file_size_to_int(_UpperCAmelCase )
A_ = []
A_ = {}
A_ = 0
A_ = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with gfile.GFile(switch_checkpoint_path + '''/checkpoint''' , '''rb''' ) as fp:
A_ = serialization.msgpack_restore(fp.read() )['''optimizer''']['''target''']
A_ = flatten_dict(_UpperCAmelCase , sep='''/''' )
A_ = {}
for layer in checkpoint_info.keys():
A_ , A_ , A_ = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if curr_real_layer_name in all_layers:
A_ = content
else:
A_ = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
A_ = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
A_ = torch.tensor(_UpperCAmelCase )
A_ = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
A_ , A_ = rename_base_flax_keys(tuple(key.split('''/''' ) ) , _UpperCAmelCase )
A_ = '''/'''.join(_UpperCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
A_ = os.path.join(
_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
A_ = {}
A_ = 0
A_ = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{len(_UpperCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_UpperCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
A_ = {}
A_ = {}
for idx, shard in enumerate(_UpperCAmelCase ):
A_ = weights_name.replace(
'''.bin''' , f'-{idx+1:05d}-of-{len(_UpperCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
A_ = os.path.join(_UpperCAmelCase , weights_name.replace('''.bin''' , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase ) )
A_ = shard
for key in shard:
A_ = shard_file
# Add the metadata
A_ = {'''total_size''': total_size}
A_ = {'''metadata''': metadata, '''weight_map''': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , '''w''' , encoding='''utf-8''' ) as f:
A_ = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase ) + '''\n'''
f.write(_UpperCAmelCase )
return metadata, index
if __name__ == "__main__":
a__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a__ : Tuple = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def UpperCAmelCase_ ( ) -> Union[str, Any]:
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
A_ = SwitchTransformersConfig.from_pretrained('''google/switch-base-8''' )
config.save_pretrained('''/home/arthur_huggingface_co/transformers/switch_converted''' )
A_ = SwitchTransformersForConditionalGeneration.from_pretrained(
'''/home/arthur_huggingface_co/transformers/switch_converted''' , device_map='''auto''' )
A_ = TaTokenizer.from_pretrained('''t5-small''' )
A_ = '''A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'''
A_ = tokenizer(_UpperCAmelCase , return_tensors='''pt''' ).input_ids
A_ = model.generate(_UpperCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 188
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def lowerCAmelCase ( UpperCamelCase_: str = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(UpperCamelCase_ ):
_a = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(UpperCamelCase_ )[1] in (".py", ".ipynb"):
yield os.path.join(UpperCamelCase_ , UpperCamelCase_ ).lstrip("./" )
def lowerCAmelCase ( UpperCamelCase_: Any ) -> Union[str, Any]:
'''simple docstring'''
return f'''{i * ' '}*''' if i else "\n##"
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str ) -> str:
'''simple docstring'''
_a = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(UpperCamelCase_ ) or old_parts[i] != new_part) and new_part:
print(f'''{md_prefix(UpperCamelCase_ )} {new_part.replace('_' , ' ' ).title()}''' )
return new_path
def lowerCAmelCase ( UpperCamelCase_: str = "." ) -> None:
'''simple docstring'''
_a = ""
for filepath in sorted(good_file_paths(UpperCamelCase_ ) ):
_a , _a = os.path.split(UpperCamelCase_ )
if filepath != old_path:
_a = print_path(UpperCamelCase_ , UpperCamelCase_ )
_a = (filepath.count(os.sep ) + 1) if filepath else 0
_a = f'''{filepath}/{filename}'''.replace(" " , "%20" )
_a = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f'''{md_prefix(UpperCamelCase_ )} [{filename}]({url})''' )
if __name__ == "__main__":
print_directory_md(""".""")
| 612
|
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
UpperCamelCase = True
except ImportError:
UpperCamelCase = False
try:
from torch.hub import _get_torch_home
UpperCamelCase = _get_torch_home()
except ImportError:
UpperCamelCase = os.path.expanduser(
os.getenv("""TORCH_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """torch"""))
)
UpperCamelCase = os.path.join(torch_cache_home, """transformers""")
UpperCamelCase = """https://cdn.huggingface.co"""
UpperCamelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
UpperCamelCase = """/""".join(str(Path(__file__).resolve()).split("""/""")[:-1])
UpperCamelCase = os.path.join(PATH, """config.yaml""")
UpperCamelCase = os.path.join(PATH, """attributes.txt""")
UpperCamelCase = os.path.join(PATH, """objects.txt""")
UpperCamelCase = os.getenv("""PYTORCH_PRETRAINED_BERT_CACHE""", default_cache_path)
UpperCamelCase = os.getenv("""PYTORCH_TRANSFORMERS_CACHE""", PYTORCH_PRETRAINED_BERT_CACHE)
UpperCamelCase = os.getenv("""TRANSFORMERS_CACHE""", PYTORCH_TRANSFORMERS_CACHE)
UpperCamelCase = """pytorch_model.bin"""
UpperCamelCase = """config.yaml"""
def lowerCAmelCase ( UpperCamelCase_: List[str]=OBJECTS , UpperCamelCase_: str=ATTRIBUTES ) -> str:
'''simple docstring'''
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_a = []
with open(UpperCamelCase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowerCAmelCase ( UpperCamelCase_: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_a = OrderedDict()
with open(UpperCamelCase_ , "rb" ) as f:
_a = pkl.load(UpperCamelCase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_a = ckp.pop(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
_a = torch.tensor(UpperCamelCase_ )
else:
assert isinstance(UpperCamelCase_ , torch.tensor ), type(UpperCamelCase_ )
_a = v
return r
class lowercase_ :
A__ : Tuple = {}
def __init__( self , a_ , a_ = "root" , a_=0 ) ->Optional[Any]:
'''simple docstring'''
_a = name
_a = level
_a = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_a = copy.deepcopy(a_ )
_a = copy.deepcopy(a_ )
if isinstance(a_ , a_ ):
_a = Config(a_ , name=a_ , level=level + 1 )
_a = v
setattr(self , a_ , a_ )
_a = d
def __repr__( self ) ->Optional[int]:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self , a_ , a_ ) ->str:
'''simple docstring'''
_a = val
_a = val
_a = key.split("." )
_a = len(a_ ) - 1
_a = self._pointer
if len(a_ ) > 1:
for i, l in enumerate(a_ ):
if hasattr(self , a_ ) and isinstance(getattr(self , a_ ) , a_ ):
setattr(getattr(self , a_ ) , ".".join(levels[i:] ) , a_ )
if l == last_level:
_a = val
else:
_a = pointer[l]
def lowerCamelCase__ ( self ) ->int:
'''simple docstring'''
return self._pointer
def lowerCamelCase__ ( self , a_ , a_ ) ->Any:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
dump(a_ , a_ )
def lowerCamelCase__ ( self , a_ , a_ ) ->int:
'''simple docstring'''
with open(f'''{file_name}''' , "w" ) as stream:
json.dump(a_ , a_ )
@staticmethod
def lowerCamelCase__ ( a_ ) ->Union[str, Any]:
'''simple docstring'''
with open(a_ ) as stream:
_a = load(a_ , Loader=a_ )
return data
def __str__( self ) ->List[Any]:
'''simple docstring'''
_a = " "
if self._name != "root":
_a = f'''{t * (self._level-1)}{self._name}:\n'''
else:
_a = ""
_a = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(a_ , a_ ):
r += f'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += f'''{t * (self._level)}{k}: {v} ({type(a_ ).__name__})\n'''
_a = level
return r[:-1]
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->Tuple:
'''simple docstring'''
_a , _a = cls.get_config_dict(a_ , **a_ )
return cls(a_ )
@classmethod
def lowerCamelCase__ ( cls , a_ , **a_ ) ->List[Any]:
'''simple docstring'''
_a = kwargs.pop("cache_dir" , a_ )
_a = kwargs.pop("force_download" , a_ )
_a = kwargs.pop("resume_download" , a_ )
_a = kwargs.pop("proxies" , a_ )
_a = kwargs.pop("local_files_only" , a_ )
if os.path.isdir(a_ ):
_a = os.path.join(a_ , a_ )
elif os.path.isfile(a_ ) or is_remote_url(a_ ):
_a = pretrained_model_name_or_path
else:
_a = hf_bucket_url(a_ , filename=a_ , use_cdn=a_ )
try:
# Load from URL or cache if already cached
_a = cached_path(
a_ , cache_dir=a_ , force_download=a_ , proxies=a_ , resume_download=a_ , local_files_only=a_ , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_a = Config.load_yaml(a_ )
except EnvironmentError:
_a = "Can't load config for"
raise EnvironmentError(a_ )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(a_ ), kwargs
def lowerCAmelCase ( UpperCamelCase_: Optional[int] ) -> Tuple:
'''simple docstring'''
_a = torch.load("dump.pt" , map_location=in_tensor.device )
_a = in_tensor.numpy()
_a = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ), (
f'''{sum([1 for x in np.isclose(UpperCamelCase_ , UpperCamelCase_ , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowerCAmelCase ( UpperCamelCase_: str ) -> List[str]:
'''simple docstring'''
_a = urlparse(UpperCamelCase_ )
return parsed.scheme in ("http", "https")
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str , UpperCamelCase_: Dict=True ) -> str:
'''simple docstring'''
_a = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_a = "/" not in model_id
if legacy_format:
return f'''{endpoint}/{model_id}-{filename}'''
else:
return f'''{endpoint}/{model_id}/{filename}'''
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=0 , UpperCamelCase_: Union[str, Any]=None , ) -> List[str]:
'''simple docstring'''
_a = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + "; ".join("{}/{}".format(UpperCamelCase_ , UpperCamelCase_ ) for k, v in user_agent.items() )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
ua += "; " + user_agent
_a = {"user-agent": ua}
if resume_size > 0:
_a = "bytes=%d-" % (resume_size,)
_a = requests.get(UpperCamelCase_ , stream=UpperCamelCase_ , proxies=UpperCamelCase_ , headers=UpperCamelCase_ )
if response.status_code == 416: # Range not satisfiable
return
_a = response.headers.get("Content-Length" )
_a = resume_size + int(UpperCamelCase_ ) if content_length is not None else None
_a = tqdm(
unit="B" , unit_scale=UpperCamelCase_ , total=UpperCamelCase_ , initial=UpperCamelCase_ , desc="Downloading" , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(UpperCamelCase_ ) )
temp_file.write(UpperCamelCase_ )
progress.close()
def lowerCAmelCase ( UpperCamelCase_: Optional[int] , UpperCamelCase_: str=None , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: Union[str, Any]=None , UpperCamelCase_: int=10 , UpperCamelCase_: List[str]=False , UpperCamelCase_: int=None , UpperCamelCase_: List[str]=False , ) -> int:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
os.makedirs(UpperCamelCase_ , exist_ok=UpperCamelCase_ )
_a = None
if not local_files_only:
try:
_a = requests.head(UpperCamelCase_ , allow_redirects=UpperCamelCase_ , proxies=UpperCamelCase_ , timeout=UpperCamelCase_ )
if response.status_code == 200:
_a = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_a = url_to_filename(UpperCamelCase_ , UpperCamelCase_ )
# get cache path to put the file
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(UpperCamelCase_ ):
return cache_path
else:
_a = [
file
for file in fnmatch.filter(os.listdir(UpperCamelCase_ ) , filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(UpperCamelCase_ ) > 0:
return os.path.join(UpperCamelCase_ , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(UpperCamelCase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_a = cache_path + ".lock"
with FileLock(UpperCamelCase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(UpperCamelCase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_a = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(UpperCamelCase_ , "a+b" ) as f:
yield f
_a = _resumable_file_manager
if os.path.exists(UpperCamelCase_ ):
_a = os.stat(UpperCamelCase_ ).st_size
else:
_a = 0
else:
_a = partial(tempfile.NamedTemporaryFile , dir=UpperCamelCase_ , delete=UpperCamelCase_ )
_a = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" , UpperCamelCase_ , temp_file.name , )
http_get(
UpperCamelCase_ , UpperCamelCase_ , proxies=UpperCamelCase_ , resume_size=UpperCamelCase_ , user_agent=UpperCamelCase_ , )
os.replace(temp_file.name , UpperCamelCase_ )
_a = {"url": url, "etag": etag}
_a = cache_path + ".json"
with open(UpperCamelCase_ , "w" ) as meta_file:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
return cache_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: Dict=None ) -> Any:
'''simple docstring'''
_a = url.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
_a = url_hash.hexdigest()
if etag:
_a = etag.encode("utf-8" )
_a = shaaaa(UpperCamelCase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowerCAmelCase ( UpperCamelCase_: List[Any] , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: Tuple=False , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: str=False , UpperCamelCase_: Dict=None , UpperCamelCase_: int=False , UpperCamelCase_: Dict=False , UpperCamelCase_: Any=False , ) -> List[str]:
'''simple docstring'''
if cache_dir is None:
_a = TRANSFORMERS_CACHE
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
_a = str(UpperCamelCase_ )
if is_remote_url(UpperCamelCase_ ):
# URL, so get it from the cache (downloading if necessary)
_a = get_from_cache(
UpperCamelCase_ , cache_dir=UpperCamelCase_ , force_download=UpperCamelCase_ , proxies=UpperCamelCase_ , resume_download=UpperCamelCase_ , user_agent=UpperCamelCase_ , local_files_only=UpperCamelCase_ , )
elif os.path.exists(UpperCamelCase_ ):
# File, and it exists.
_a = url_or_filename
elif urlparse(UpperCamelCase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(UpperCamelCase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(UpperCamelCase_ ) )
if extract_compressed_file:
if not is_zipfile(UpperCamelCase_ ) and not tarfile.is_tarfile(UpperCamelCase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_a , _a = os.path.split(UpperCamelCase_ )
_a = output_file.replace("." , "-" ) + "-extracted"
_a = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isdir(UpperCamelCase_ ) and os.listdir(UpperCamelCase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_a = output_path + ".lock"
with FileLock(UpperCamelCase_ ):
shutil.rmtree(UpperCamelCase_ , ignore_errors=UpperCamelCase_ )
os.makedirs(UpperCamelCase_ )
if is_zipfile(UpperCamelCase_ ):
with ZipFile(UpperCamelCase_ , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase_ )
zip_file.close()
elif tarfile.is_tarfile(UpperCamelCase_ ):
_a = tarfile.open(UpperCamelCase_ )
tar_file.extractall(UpperCamelCase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(UpperCamelCase_ ) )
return output_path_extracted
return output_path
def lowerCAmelCase ( UpperCamelCase_: str , UpperCamelCase_: str="," ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
with open(UpperCamelCase_ ) as f:
_a = eval(f.read() )
else:
_a = requests.get(UpperCamelCase_ )
try:
_a = requests.json()
except Exception:
_a = req.content.decode()
assert data is not None, "could not connect"
try:
_a = eval(UpperCamelCase_ )
except Exception:
_a = data.split("\n" )
req.close()
return data
def lowerCAmelCase ( UpperCamelCase_: Dict ) -> Union[str, Any]:
'''simple docstring'''
_a = requests.get(UpperCamelCase_ )
_a = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowerCAmelCase ( UpperCamelCase_: int ) -> Tuple:
'''simple docstring'''
_a = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(UpperCamelCase_ )
with open(UpperCamelCase_ , "rb" ) as stream:
_a = pkl.load(UpperCamelCase_ )
_a = weights.pop("model" )
_a = {}
for k, v in model.items():
_a = torch.from_numpy(UpperCamelCase_ )
if "running_var" in k:
_a = torch.tensor([0] )
_a = k.replace("running_var" , "num_batches_tracked" )
_a = zero
return new
def lowerCAmelCase ( ) -> str:
'''simple docstring'''
print(f'''{os.path.abspath(os.path.join(UpperCamelCase_ , os.pardir ) )}/demo.ipynb''' )
def lowerCAmelCase ( UpperCamelCase_: List[str] , UpperCamelCase_: List[str]="RGB" ) -> List[Any]:
'''simple docstring'''
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
if os.path.isfile(UpperCamelCase_ ):
_a = cva.imread(UpperCamelCase_ )
else:
_a = get_image_from_url(UpperCamelCase_ )
assert img is not None, f'''could not connect to: {im}'''
_a = cva.cvtColor(UpperCamelCase_ , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_a = img[:, :, ::-1]
return img
def lowerCAmelCase ( UpperCamelCase_: Any , UpperCamelCase_: str=1 ) -> Any:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(UpperCamelCase_ ) , UpperCamelCase_ ))
| 612
| 1
|
"""simple docstring"""
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = arr.split(""",""" )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE__ : List[Any] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE__ : Any = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE__ : int = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
UpperCAmelCase__ : Optional[Any] = input('please input some numbers:')
UpperCAmelCase__ : Tuple = SubArray(whole_array)
UpperCAmelCase__ : List[str] = array.solve_sub_array()
print(('the results is:', re))
| 223
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
UpperCAmelCase__ : List[str] = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def lowercase_ ( _snake_case ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def lowercase_ ( _snake_case ,_snake_case ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
elif args.student_type == "gpt2":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
def lowercase_ ( _snake_case ,_snake_case ):
if args.student_type == "roberta":
SCREAMING_SNAKE_CASE__ : List[str] = False
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" ,action="""store_true""" ,help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" ,type=_snake_case ,required=_snake_case ,help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" ,type=_snake_case ,required=_snake_case ,help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" ,)
parser.add_argument(
"""--student_type""" ,type=_snake_case ,choices=["""distilbert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""The student type (DistilBERT, RoBERTa).""" ,)
parser.add_argument("""--student_config""" ,type=_snake_case ,required=_snake_case ,help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" ,default=_snake_case ,type=_snake_case ,help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" ,choices=["""bert""", """roberta""", """gpt2"""] ,required=_snake_case ,help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" ,type=_snake_case ,required=_snake_case ,help="""The teacher model.""" )
parser.add_argument("""--temperature""" ,default=2.0 ,type=_snake_case ,help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" ,default=0.0 ,type=_snake_case ,help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" ,)
parser.add_argument("""--alpha_clm""" ,default=0.5 ,type=_snake_case ,help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" ,default=0.0 ,type=_snake_case ,help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" ,action="""store_true""" ,help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" ,default=0.15 ,type=_snake_case ,help="""Proportion of tokens for which we need to make a prediction.""" ,)
parser.add_argument("""--word_mask""" ,default=0.8 ,type=_snake_case ,help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" ,default=0.1 ,type=_snake_case ,help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" ,default=0.7 ,type=_snake_case ,help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" ,)
parser.add_argument("""--token_counts""" ,type=_snake_case ,help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" ,action="""store_true""" ,help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" ,)
parser.add_argument(
"""--freeze_pos_embs""" ,action="""store_true""" ,help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" ,)
parser.add_argument(
"""--freeze_token_type_embds""" ,action="""store_true""" ,help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" ,)
parser.add_argument("""--n_epoch""" ,type=_snake_case ,default=3 ,help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" ,type=_snake_case ,default=5 ,help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" ,action="""store_false""" ,help="""If true, group sequences that have similar length into the same batch. Default is true.""" ,)
parser.add_argument(
"""--gradient_accumulation_steps""" ,type=_snake_case ,default=50 ,help="""Gradient accumulation for larger training batches.""" ,)
parser.add_argument("""--warmup_prop""" ,default=0.05 ,type=_snake_case ,help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" ,default=0.0 ,type=_snake_case ,help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" ,default=5E-4 ,type=_snake_case ,help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" ,default=1E-6 ,type=_snake_case ,help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" ,default=5.0 ,type=_snake_case ,help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" ,default=0.02 ,type=_snake_case ,help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" ,action="""store_true""" ,help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" ,)
parser.add_argument(
"""--fp16_opt_level""" ,type=_snake_case ,default="""O1""" ,help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) ,)
parser.add_argument("""--n_gpu""" ,type=_snake_case ,default=1 ,help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" ,type=_snake_case ,default=-1 ,help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" ,type=_snake_case ,default=56 ,help="""Random seed""" )
parser.add_argument("""--log_interval""" ,type=_snake_case ,default=500 ,help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" ,type=_snake_case ,default=4_000 ,help="""Checkpoint interval.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path ,"""parameters.json""" ) ,"""w""" ) as f:
json.dump(vars(_snake_case ) ,_snake_case ,indent=4 )
git_log(args.dump_path )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.student_type]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
SCREAMING_SNAKE_CASE__ : str = teacher_tokenizer_class.from_pretrained(args.teacher_name )
SCREAMING_SNAKE_CASE__ : int = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
SCREAMING_SNAKE_CASE__ : str = tokenizer.all_special_tokens.index(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = special_tok_ids
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file ,"""rb""" ) as fp:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pickle.load(_snake_case )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts ,"""rb""" ) as fp:
SCREAMING_SNAKE_CASE__ : List[Any] = pickle.load(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = np.maximum(_snake_case ,1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
SCREAMING_SNAKE_CASE__ : Optional[int] = 0.0 # do not predict special tokens
SCREAMING_SNAKE_CASE__ : int = torch.from_numpy(_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : Any = LmSeqsDataset(params=_snake_case ,data=_snake_case )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
SCREAMING_SNAKE_CASE__ : Tuple = student_config_class.from_pretrained(args.student_config )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights ,config=_snake_case )
else:
SCREAMING_SNAKE_CASE__ : Tuple = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
SCREAMING_SNAKE_CASE__ : str = teacher_model_class.from_pretrained(args.teacher_name ,output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case ,_snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case ,_snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
SCREAMING_SNAKE_CASE__ : int = Distiller(
params=_snake_case ,dataset=_snake_case ,token_probs=_snake_case ,student=_snake_case ,teacher=_snake_case )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 223
| 1
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCAmelCase ( snake_case__ : List[str] , snake_case__ : Optional[Any]=0.9_9_9 , snake_case__ : Optional[Any]="cosine" , )-> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case__ : Optional[int] ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'Unsupported alpha_tranform_type: {alpha_transform_type}' )
A_ = []
for i in range(snake_case__ ):
A_ = i / num_diffusion_timesteps
A_ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case__ ) / alpha_bar_fn(snake_case__ ) , snake_case__ ) )
return torch.tensor(snake_case__ , dtype=torch.floataa )
class lowerCamelCase ( __snake_case , __snake_case ):
"""simple docstring"""
lowerCAmelCase_ = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase_ = 2
@register_to_config
def __init__( self , __UpperCamelCase = 1000 , __UpperCamelCase = 0.0_0085 , __UpperCamelCase = 0.012 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "epsilon" , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = 1.0 , __UpperCamelCase = "linspace" , __UpperCamelCase = 0 , ):
if trained_betas is not None:
A_ = torch.tensor(__UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
A_ = torch.linspace(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A_ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __UpperCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A_ = betas_for_alpha_bar(__UpperCamelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
A_ = betas_for_alpha_bar(__UpperCamelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' )
A_ = 1.0 - self.betas
A_ = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A_ = use_karras_sigmas
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase=None ):
if schedule_timesteps is None:
A_ = self.timesteps
A_ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A_ = 1 if len(__UpperCamelCase ) > 1 else 0
else:
A_ = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep
A_ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowercase_ ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , ):
A_ = self.index_for_timestep(__UpperCamelCase )
A_ = self.sigmas[step_index]
A_ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , ):
A_ = num_inference_steps
A_ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A_ = np.linspace(0 , num_train_timesteps - 1 , __UpperCamelCase , dtype=__UpperCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A_ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ = (np.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A_ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A_ = (np.arange(__UpperCamelCase , 0 , -step_ratio )).round().copy().astype(__UpperCamelCase )
timesteps -= 1
else:
raise ValueError(
f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' )
A_ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A_ = np.log(__UpperCamelCase )
A_ = np.interp(__UpperCamelCase , np.arange(0 , len(__UpperCamelCase ) ) , __UpperCamelCase )
if self.config.use_karras_sigmas:
A_ = self._convert_to_karras(in_sigmas=__UpperCamelCase , num_inference_steps=self.num_inference_steps )
A_ = np.array([self._sigma_to_t(__UpperCamelCase , __UpperCamelCase ) for sigma in sigmas] )
A_ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A_ = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase )
A_ = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
A_ = torch.from_numpy(__UpperCamelCase )
A_ = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(__UpperCamelCase ).startswith("mps" ):
# mps does not support float64
A_ = timesteps.to(__UpperCamelCase , dtype=torch.floataa )
else:
A_ = timesteps.to(device=__UpperCamelCase )
# empty dt and derivative
A_ = None
A_ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A_ = defaultdict(__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
# get log sigma
A_ = np.log(__UpperCamelCase )
# get distribution
A_ = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
A_ = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
A_ = low_idx + 1
A_ = log_sigmas[low_idx]
A_ = log_sigmas[high_idx]
# interpolate sigmas
A_ = (low - log_sigma) / (low - high)
A_ = np.clip(__UpperCamelCase , 0 , 1 )
# transform interpolation to time range
A_ = (1 - w) * low_idx + w * high_idx
A_ = t.reshape(sigma.shape )
return t
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
A_ = in_sigmas[-1].item()
A_ = in_sigmas[0].item()
A_ = 7.0 # 7.0 is the value used in the paper
A_ = np.linspace(0 , 1 , __UpperCamelCase )
A_ = sigma_min ** (1 / rho)
A_ = sigma_max ** (1 / rho)
A_ = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowercase_ ( self ):
return self.dt is None
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True , ):
A_ = self.index_for_timestep(__UpperCamelCase )
# advance index counter by 1
A_ = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A_ = self.sigmas[step_index]
A_ = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
A_ = self.sigmas[step_index - 1]
A_ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A_ = 0
A_ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A_ = sigma_hat if self.state_in_first_order else sigma_next
A_ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A_ = sigma_hat if self.state_in_first_order else sigma_next
A_ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
A_ = model_output
else:
raise ValueError(
f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' )
if self.config.clip_sample:
A_ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A_ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A_ = sigma_next - sigma_hat
# store for 2nd order step
A_ = derivative
A_ = dt
A_ = sample
else:
# 2. 2nd order / Heun's method
A_ = (sample - pred_original_sample) / sigma_next
A_ = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
A_ = self.dt
A_ = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
A_ = None
A_ = None
A_ = None
A_ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCamelCase )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A_ = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCamelCase ):
# mps does not support float64
A_ = self.timesteps.to(original_samples.device , dtype=torch.floataa )
A_ = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
A_ = self.timesteps.to(original_samples.device )
A_ = timesteps.to(original_samples.device )
A_ = [self.index_for_timestep(__UpperCamelCase , __UpperCamelCase ) for t in timesteps]
A_ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A_ = sigma.unsqueeze(-1 )
A_ = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 718
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
@slow
@require_torch
def lowercase_ ( self ):
A_ = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
A_ = load_dataset("ashraq/esc50" )
A_ = dataset["train"]["audio"][-1]["array"]
A_ = audio_classifier(__UpperCamelCase , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
] , )
A_ = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
A_ = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self ):
pass
| 608
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Tuple = ['''note_seq''']
def __init__( self : int ,*_a : str ,**_a : List[str] ):
'''simple docstring'''
requires_backends(self ,['note_seq'] )
@classmethod
def __lowercase ( cls : Dict ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(cls ,['note_seq'] )
@classmethod
def __lowercase ( cls : Union[str, Any] ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(cls ,['note_seq'] )
| 229
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : int = '''mobilenet_v2'''
def __init__( self : Optional[int] ,_a : str=3 ,_a : Dict=224 ,_a : Tuple=1.0 ,_a : List[Any]=8 ,_a : int=8 ,_a : Dict=6 ,_a : Tuple=32 ,_a : str=True ,_a : str=True ,_a : int="relu6" ,_a : Tuple=True ,_a : List[str]=0.8 ,_a : Dict=0.02 ,_a : Tuple=0.001 ,_a : Optional[Any]=255 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_a : Union[str, Any] = num_channels
_a : List[Any] = image_size
_a : Optional[Any] = depth_multiplier
_a : Union[str, Any] = depth_divisible_by
_a : Tuple = min_depth
_a : Union[str, Any] = expand_ratio
_a : Union[str, Any] = output_stride
_a : Union[str, Any] = first_layer_is_expansion
_a : Optional[Any] = finegrained_output
_a : Any = hidden_act
_a : int = tf_padding
_a : List[str] = classifier_dropout_prob
_a : List[Any] = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : Any = semantic_loss_ignore_index
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : str = version.parse('''1.11''' )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return 1E-4
| 229
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self , _lowercase ):
lowerCAmelCase_ : str = 3
lowerCAmelCase_ : Optional[int] = 250
lowerCAmelCase_ : Any = ids_tensor((batch_size, length) , _lowercase )
lowerCAmelCase_ : Union[str, Any] = torch.ones((batch_size, length) , device=_lowercase , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = self._get_tensors(5 )
lowerCAmelCase_ : Tuple = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : List[str] = MaxLengthCriteria(max_length=10 )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : int = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase_ , lowerCAmelCase_ : int = self._get_tensors(5 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Optional[Any] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ , lowerCAmelCase_ : Any = self._get_tensors(5 )
lowerCAmelCase_ : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_lowercase , _lowercase ) )
lowerCAmelCase_ : Union[str, Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_lowercase , _lowercase ) )
def UpperCAmelCase__ ( self ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_lowercase ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
lowerCAmelCase_ : List[Any] = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_lowercase ) , 1 )
| 440
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = inspect.getfile(accelerate.test_utils )
__UpperCamelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_cli.py"""] )
__UpperCamelCase = ["""accelerate""", """launch"""]
__UpperCamelCase = Path.home() / """.cache/huggingface/accelerate"""
__UpperCamelCase = """default_config.yaml"""
__UpperCamelCase = config_folder / config_file
__UpperCamelCase = config_folder / """_default_config.yaml"""
__UpperCamelCase = Path("""tests/test_configs""" )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def UpperCAmelCase__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : int = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def UpperCAmelCase__ ( self ):
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class lowercase__ ( unittest.TestCase ):
__UpperCamelCase = """test-tpu"""
__UpperCamelCase = """us-central1-a"""
__UpperCamelCase = """ls"""
__UpperCamelCase = ["""accelerate""", """tpu-config"""]
__UpperCamelCase = """cd /usr/share"""
__UpperCamelCase = """tests/test_samples/test_command_file.sh"""
__UpperCamelCase = """Running gcloud compute tpus tpu-vm ssh"""
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=_lowercase )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : str = run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Optional[Any] = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Tuple = run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
def UpperCAmelCase__ ( self ):
lowerCAmelCase_ : Any = run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=_lowercase , )
self.assertIn(
F'{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all' , _lowercase , )
| 440
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__A =get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( enum.Enum ):
lowerCAmelCase__ = 'all_checks'
lowerCAmelCase__ = 'basic_checks'
lowerCAmelCase__ = 'no_checks'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ):
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) )
if len(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) )
lowerCamelCase_ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
lowerCamelCase_ = " for " + verification_name if verification_name is not None else ""
if len(lowerCamelCase__ ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
pass
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) > 0:
raise ExpectedMoreSplits(str(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) )
if len(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) > 0:
raise UnexpectedSplits(str(set(lowerCamelCase__ ) - set(lowerCamelCase__ ) ) )
lowerCamelCase_ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(lowerCamelCase__ ) > 0:
raise NonMatchingSplitsSizesError(str(lowerCamelCase__ ) )
logger.info("All the splits matched successfully." )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = True ):
if record_checksum:
lowerCamelCase_ = shaaaa()
with open(lowerCamelCase__ , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b"" ):
m.update(lowerCamelCase__ )
lowerCamelCase_ = m.hexdigest()
else:
lowerCamelCase_ = None
return {"num_bytes": os.path.getsize(lowerCamelCase__ ), "checksum": checksum}
def lowerCamelCase_ ( lowerCamelCase__ ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 463
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_funnel import FunnelTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A =[
'''small''',
'''small-base''',
'''medium''',
'''medium-base''',
'''intermediate''',
'''intermediate-base''',
'''large''',
'''large-base''',
'''xlarge''',
'''xlarge-base''',
]
__A ={
'''vocab_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''',
'''funnel-transformer/small-base''': (
'''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''',
'''funnel-transformer/medium-base''': (
'''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''',
'''funnel-transformer/large-base''': (
'''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json'''
),
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''',
'''funnel-transformer/xlarge-base''': (
'''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json'''
),
},
}
__A ={F"""funnel-transformer/{name}""": 5_1_2 for name in _model_names}
__A ={F"""funnel-transformer/{name}""": {'''do_lower_case''': True} for name in _model_names}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = FunnelTokenizer
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = 2
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="<unk>" , lowercase="<sep>" , lowercase="<pad>" , lowercase="<cls>" , lowercase="<mask>" , lowercase="<s>" , lowercase="</s>" , lowercase=True , lowercase=True , lowercase=None , lowercase="##" , **lowercase , ) -> List[str]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , bos_token=lowercase , eos_token=lowercase , clean_text=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , wordpieces_prefix=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> str:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0]
return len(cls ) * [self.cls_token_type_id] + len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 463
| 1
|
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
lowercase__ :Optional[int] = {
"gwf-440k": {
"url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-small-190k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt",
"sample_rate": 4_8000,
"sample_size": 6_5536,
},
"jmann-large-580k": {
"url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt",
"sample_rate": 4_8000,
"sample_size": 13_1072,
},
"maestro-uncond-150k": {
"url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"unlocked-uncond-250k": {
"url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
"honk-140k": {
"url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt",
"sample_rate": 1_6000,
"sample_size": 6_5536,
},
}
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
return torch.atana(lowerCAmelCase__ , lowerCAmelCase__ ) / math.pi * 2
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.sin(t * math.pi / 2 ) ** 2
lowercase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(lowerCAmelCase__ , lowerCAmelCase__ )
class lowercase ( SCREAMING_SNAKE_CASE__ ):
pass
class lowercase ( nn.Module ):
def __init__( self ,A__):
super().__init__()
lowercase = DiffusionAttnUnetaD(A__ ,n_attn_layers=4)
lowercase = deepcopy(self.diffusion)
lowercase = torch.quasirandom.SobolEngine(1 ,scramble=A__)
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = MODELS_MAP[model_name]['''url''']
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
lowercase__ :Optional[Any] = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
}
lowercase__ :Tuple = {
"8": "resnets.0",
"9": "attentions.0",
"10": "resnets.1",
"11": "attentions.1",
"12": "resnets.2",
"13": "attentions.2",
}
lowercase__ :int = {
"1": "resnets.0",
"2": "attentions.0",
"3": "resnets.1",
"4": "attentions.1",
"5": "resnets.2",
"6": "attentions.2",
"8": "resnets.3",
"9": "attentions.3",
"10": "resnets.4",
"11": "attentions.4",
"12": "resnets.5",
"13": "attentions.5",
}
lowercase__ :Dict = {
"0": "resnets.0",
"1": "resnets.1",
"2": "resnets.2",
"4": "resnets.0",
"5": "resnets.1",
"6": "resnets.2",
}
lowercase__ :Optional[Any] = {
"skip": "conv_skip",
"main.0": "conv_1",
"main.1": "group_norm_1",
"main.3": "conv_2",
"main.4": "group_norm_2",
}
lowercase__ :List[Any] = {
"norm": "group_norm",
"qkv_proj": ["query", "key", "value"],
"out_proj": ["proj_attn"],
}
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
if name.startswith('''skip''' ):
return name.replace('''skip''' , RES_CONV_MAP['''skip'''] )
# name has to be of format main.{digit}
if not name.startswith('''main.''' ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(lowerCAmelCase__ ) and not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return name.replace(lowerCAmelCase__ , lowerCAmelCase__ )
elif name.startswith(lowerCAmelCase__ ):
return [name.replace(lowerCAmelCase__ , lowerCAmelCase__ ) for v in value]
raise ValueError(f'Attn error with {name}' )
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__=13 ):
'''simple docstring'''
lowercase = input_string
if string.split('''.''' )[0] == "timestep_embed":
return string.replace('''timestep_embed''' , '''time_proj''' )
lowercase = 0
if string.startswith('''net.3.''' ):
depth += 1
lowercase = string[6:]
elif string.startswith('''net.''' ):
lowercase = string[4:]
while string.startswith('''main.7.''' ):
depth += 1
lowercase = string[7:]
if string.startswith('''main.''' ):
lowercase = string[5:]
# mid block
if string[:2].isdigit():
lowercase = string[:2]
lowercase = string[2:]
else:
lowercase = string[0]
lowercase = string[1:]
if depth == max_depth:
lowercase = MID_NUM_TO_LAYER[layer_num]
lowercase = '''mid_block'''
elif depth > 0 and int(lowerCAmelCase__ ) < 7:
lowercase = DOWN_NUM_TO_LAYER[layer_num]
lowercase = f'down_blocks.{depth}'
elif depth > 0 and int(lowerCAmelCase__ ) > 7:
lowercase = UP_NUM_TO_LAYER[layer_num]
lowercase = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
lowercase = DEPTH_0_TO_LAYER[layer_num]
lowercase = f'up_blocks.{max_depth - 1}' if int(lowerCAmelCase__ ) > 3 else '''down_blocks.0'''
if not string_left.startswith('''.''' ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
lowercase = string_left[1:]
if "resnets" in new_layer:
lowercase = convert_resconv_naming(lowerCAmelCase__ )
elif "attentions" in new_layer:
lowercase = convert_attn_naming(lowerCAmelCase__ )
lowercase = new_string_left
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = prefix + '''.''' + new_layer + '''.''' + string_left
else:
lowercase = [prefix + '''.''' + new_layer + '''.''' + s for s in string_left]
return new_string
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
for k, v in state_dict.items():
if k.endswith('''kernel''' ):
# up- and downsample layers, don't have trainable weights
continue
lowercase = rename(lowerCAmelCase__ )
# check if we need to transform from Conv => Linear for attention
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = transform_conv_attns(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowercase = v
return new_state_dict
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if len(lowerCAmelCase__ ) == 1:
if len(v.shape ) == 3:
# weight
lowercase = v[:, :, 0]
else:
# bias
lowercase = v
else:
# qkv matrices
lowercase = v.shape[0]
lowercase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
lowercase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
lowercase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowercase = args.model_path.split('''/''' )[-1].split('''.''' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
lowercase = download(lowerCAmelCase__ )
lowercase = MODELS_MAP[model_name]['''sample_rate''']
lowercase = MODELS_MAP[model_name]['''sample_size''']
lowercase = Object()
lowercase = sample_size
lowercase = sample_rate
lowercase = 0
lowercase = UNetaDModel(sample_size=lowerCAmelCase__ , sample_rate=lowerCAmelCase__ )
lowercase = diffusers_model.state_dict()
lowercase = DiffusionUncond(lowerCAmelCase__ )
orig_model.load_state_dict(torch.load(args.model_path , map_location=lowerCAmelCase__ )['''state_dict'''] )
lowercase = orig_model.diffusion_ema.eval()
lowercase = orig_model.state_dict()
lowercase = rename_orig_weights(lowerCAmelCase__ )
lowercase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
lowercase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(lowerCAmelCase__ ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith('''kernel''' ) for k in list(lowerCAmelCase__ ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
lowercase = value.squeeze()
lowercase = value
diffusers_model.load_state_dict(lowerCAmelCase__ )
lowercase = 100
lowercase = 33
lowercase = IPNDMScheduler(num_train_timesteps=lowerCAmelCase__ )
lowercase = torch.manual_seed(lowerCAmelCase__ )
lowercase = torch.randn([1, 2, config.sample_size] , generator=lowerCAmelCase__ ).to(lowerCAmelCase__ )
lowercase = torch.linspace(1 , 0 , steps + 1 , device=lowerCAmelCase__ )[:-1]
lowercase = get_crash_schedule(lowerCAmelCase__ )
lowercase = DanceDiffusionPipeline(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
lowercase = torch.manual_seed(33 )
lowercase = pipe(num_inference_steps=lowerCAmelCase__ , generator=lowerCAmelCase__ ).audios
lowercase = sampling.iplms_sample(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , {} )
lowercase = generated.clamp(-1 , 1 )
lowercase = (generated - audio).abs().sum()
lowercase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('''Diff sum''' , lowerCAmelCase__ )
print('''Diff max''' , lowerCAmelCase__ )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
lowercase__ :Tuple = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
lowercase__ :Dict = parser.parse_args()
main(args)
| 712
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Tuple = {
"configuration_instructblip": [
"INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"InstructBlipConfig",
"InstructBlipQFormerConfig",
"InstructBlipVisionConfig",
],
"processing_instructblip": ["InstructBlipProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[str] = [
"INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"InstructBlipQFormerModel",
"InstructBlipPreTrainedModel",
"InstructBlipForConditionalGeneration",
"InstructBlipVisionModel",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowercase__ :List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 633
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = (UnCLIPScheduler,)
def UpperCamelCase__ ( self : str , **UpperCamelCase__ : Tuple ) -> Union[str, Any]:
_UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**A_ )
return config
def UpperCamelCase__ ( self : str ) -> Optional[Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def UpperCamelCase__ ( self : Tuple ) -> Any:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=A_ )
def UpperCamelCase__ ( self : Tuple ) -> str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def UpperCamelCase__ ( self : Union[str, Any] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=A_ )
def UpperCamelCase__ ( self : str ) -> Union[str, Any]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=A_ )
def UpperCamelCase__ ( self : Any ) -> Union[str, Any]:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=A_ , prev_timestep=A_ )
def UpperCamelCase__ ( self : Dict ) -> Any:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase =scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def UpperCamelCase__ ( self : int ) -> Optional[Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase =scheduler_class(**A_ )
_UpperCamelCase =0.5
assert scheduler._get_variance(1 , predicted_variance=A_ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=A_ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=A_ ) - -0.0010011 < 1E-5
def UpperCamelCase__ ( self : str ) -> Optional[Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**A_ )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
_UpperCamelCase =model(A_ , A_ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(A_ , A_ , A_ , generator=A_ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(A_ ) )
_UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def UpperCamelCase__ ( self : Optional[int] ) -> Any:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**A_ )
scheduler.set_timesteps(25 )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(A_ ):
# 1. predict noise residual
_UpperCamelCase =model(A_ , A_ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase =None
else:
_UpperCamelCase =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(
A_ , A_ , A_ , prev_timestep=A_ , generator=A_ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(A_ ) )
_UpperCamelCase =torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def UpperCamelCase__ ( self : Union[str, Any] ) -> Tuple:
pass
def UpperCamelCase__ ( self : Dict ) -> List[Any]:
pass
| 404
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase__ =MaskFormerConfig(backbone_config=A )
UpperCAmelCase__ ="huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase__ =847
UpperCAmelCase__ ="maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase__ =150
UpperCAmelCase__ ="ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase__ =171
UpperCAmelCase__ ="maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase__ =133
UpperCAmelCase__ ="coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase__ =19
UpperCAmelCase__ ="cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase__ =65
UpperCAmelCase__ ="mapillary-vistas-id2label.json"
UpperCAmelCase__ =json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ ={int(A ): v for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =[]
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =dct.pop(A )
UpperCAmelCase__ =val
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase__ =state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[:dim, :]
UpperCAmelCase__ =in_proj_bias[: dim]
UpperCAmelCase__ =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ =in_proj_weight[
-dim :, :
]
UpperCAmelCase__ =in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[: hidden_size, :]
UpperCAmelCase__ =in_proj_bias[:config.hidden_size]
UpperCAmelCase__ =in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ =in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ =in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ =in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[: hidden_size, :]
UpperCAmelCase__ =in_proj_bias[:config.hidden_size]
UpperCAmelCase__ =in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ =in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ =in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ =in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ =Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( A , A , A , A = False ):
'''simple docstring'''
UpperCAmelCase__ =get_maskformer_config(A )
# load original state_dict
with open(A , "rb" ) as f:
UpperCAmelCase__ =pickle.load(A )
UpperCAmelCase__ =data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase__ =create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase__ =torch.from_numpy(A )
# load 🤗 model
UpperCAmelCase__ =MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
UpperCAmelCase__ , UpperCAmelCase__ =model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase__ =prepare_img()
if "vistas" in model_name:
UpperCAmelCase__ =65
elif "cityscapes" in model_name:
UpperCAmelCase__ =65535
else:
UpperCAmelCase__ =255
UpperCAmelCase__ =True if "ade" in model_name else False
UpperCAmelCase__ =MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
UpperCAmelCase__ =image_processor(A , return_tensors="pt" )
UpperCAmelCase__ =model(**A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase__ =torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 625
| 0
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
lowerCamelCase =get_logger(__name__)
class _lowerCamelCase ( enum.Enum ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """all_checks"""
SCREAMING_SNAKE_CASE_ = """basic_checks"""
SCREAMING_SNAKE_CASE_ = """no_checks"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ):
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) )
if len(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) )
UpperCamelCase__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase__ : List[str] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(_UpperCamelCase ) > 0:
raise NonMatchingChecksumError(
f'''Checksums didn\'t match{for_verification_name}:\n'''
f'''{bad_urls}\n'''
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
class _lowerCamelCase ( UpperCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ ):
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) )
if len(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) > 0:
raise UnexpectedSplits(str(set(_UpperCamelCase ) - set(_UpperCamelCase ) ) )
UpperCamelCase__ : Optional[Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_UpperCamelCase ) > 0:
raise NonMatchingSplitsSizesError(str(_UpperCamelCase ) )
logger.info('''All the splits matched successfully.''' )
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ , UpperCamelCase__ = True ):
if record_checksum:
UpperCamelCase__ : Union[str, Any] = shaaaa()
with open(_UpperCamelCase , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'''''' ):
m.update(_UpperCamelCase )
UpperCamelCase__ : Dict = m.hexdigest()
else:
UpperCamelCase__ : int = None
return {"num_bytes": os.path.getsize(_UpperCamelCase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE_ ( UpperCamelCase__ ):
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 716
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase ={
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase =[
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 462
| 0
|
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def snake_case ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def snake_case ( ):
'''simple docstring'''
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head("""https://huggingface.co""" )
| 80
|
import random
def _A ( lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ):
"""simple docstring"""
lowerCAmelCase__ = a[left_index]
lowerCAmelCase__ = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase_ ):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ = a[i - 1], a[left_index]
return i - 1
def _A ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ):
"""simple docstring"""
if left < right:
lowerCAmelCase__ = random.randint(lowerCAmelCase_ , right - 1 )
lowerCAmelCase__ , lowerCAmelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ = partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
quick_sort_random(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase_ , pivot_index + 1 , lowerCAmelCase_ ) # recursive quicksort to the right of the pivot point
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = input("Enter numbers separated by a comma:\n" ).strip()
lowerCAmelCase__ = [int(lowerCAmelCase_ ) for item in user_input.split("," )]
quick_sort_random(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) )
print(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 61
| 0
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
while a != 0:
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = b % a, a
return b
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
"""simple docstring"""
if gcd(lowerCamelCase__ , lowerCamelCase__ ) != 1:
__UpperCAmelCase : Any = f"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(lowerCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : str = 1, 0, a
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = 0, 1, m
while va != 0:
__UpperCAmelCase : Any = ua // va
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(lowerCamelCase__ ) )
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
if index == len(lowerCamelCase__ ):
return True
# Recursive Step
for i in range(lowerCamelCase__ ):
if valid_coloring(graph[index] , lowerCamelCase__ , lowerCamelCase__ ):
# Color current vertex
__UpperCAmelCase : List[str] = i
# Validate coloring
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 ):
return True
# Backtrack
__UpperCAmelCase : Any = -1
return False
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = [-1] * len(lowerCamelCase__ )
if util_color(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 ):
return colored_vertices
return []
| 10
| 1
|
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_3 , SCREAMING_SNAKE_CASE_ : int=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=9_9 , SCREAMING_SNAKE_CASE_ : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE_ : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=5 , SCREAMING_SNAKE_CASE_ : Any=4 , SCREAMING_SNAKE_CASE_ : Dict=3_7 , SCREAMING_SNAKE_CASE_ : Optional[Any]="gelu" , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : Any=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_2 , SCREAMING_SNAKE_CASE_ : Any=1_6 , SCREAMING_SNAKE_CASE_ : str=2 , SCREAMING_SNAKE_CASE_ : int=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : str=None , ) -> List[Any]:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = embedding_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = scope
def _lowercase ( self : Optional[int] ) -> Optional[int]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Tuple ) -> List[Any]:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> str:
lowercase_ = MegatronBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
lowercase_ = MegatronBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[int]:
lowercase_ = MegatronBertForCausalLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]:
lowercase_ = MegatronBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
lowercase_ = MegatronBertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , next_sentence_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Tuple:
lowercase_ = MegatronBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = MegatronBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple ) -> int:
lowercase_ = self.num_choices
lowercase_ = MegatronBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Any ) -> Dict:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
a :Tuple = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a :List[str] = True
# test_resize_embeddings = False
a :str = False
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=False ) -> int:
lowercase_ = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowercase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowercase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = MegatronBertModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> Any:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Tuple ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def a ( snake_case__: Any ):
'''simple docstring'''
return torch.tensor(
snake_case__ , dtype=torch.long , device=snake_case__ , )
__a = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('''Model is not available.''' )
def _lowercase ( self : Union[str, Any] ) -> Dict:
lowercase_ = '''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
lowercase_ = os.path.join(os.environ['''MYDIR'''] , SCREAMING_SNAKE_CASE_ )
lowercase_ = MegatronBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.half()
lowercase_ = _long_tensor([[1_0_1, 7_1_1_0, 1_0_0_5, 1_0_5_6, 2_0_2_3, 1_1_3_3_3, 1_7_4_1_3, 1_0_2_9, 1_0_2]] )
with torch.no_grad():
lowercase_ = model(SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = torch.Size((1, 9, 1_0_2_4) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase_ = [-0.60_40, -0.25_17, -0.10_25, 0.34_20, -0.67_58, -0.00_17, -0.10_89, -0.19_90, 0.57_28]
for ii in range(3 ):
for jj in range(3 ):
lowercase_ = output[0, ii, jj]
lowercase_ = expected[3 * ii + jj]
lowercase_ = '''ii={} jj={} a={} b={}'''.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertTrue(math.isclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rel_tol=SCREAMING_SNAKE_CASE_ , abs_tol=SCREAMING_SNAKE_CASE_ ) , msg=SCREAMING_SNAKE_CASE_ )
| 97
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__A : str = logging.get_logger(__name__)
__A : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Tuple = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : str = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__A : Optional[Any] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2,
'facebook/dpr-ctx_encoder-multiset-base': 5_1_2,
}
__A : List[str] = {
'facebook/dpr-question_encoder-single-nq-base': 5_1_2,
'facebook/dpr-question_encoder-multiset-base': 5_1_2,
}
__A : Any = {
'facebook/dpr-reader-single-nq-base': 5_1_2,
'facebook/dpr-reader-multiset-base': 5_1_2,
}
__A : str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__A : Any = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__A : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__A : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__A : List[Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__A : List[Any] = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __call__( self : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Optional[str] = None , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Union[bool, str] = False , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[Union[str, TensorType]] = None , __lowerCamelCase : Optional[bool] = None , **__lowerCamelCase : Any , ):
if titles is None and texts is None:
return super().__call__(
__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE = titles if texts is None else texts
return super().__call__(
__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase , return_attention_mask=__lowerCamelCase , **__lowerCamelCase , )
SCREAMING_SNAKE_CASE = titles if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [titles]
SCREAMING_SNAKE_CASE = texts if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [texts]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = questions if not isinstance(__lowerCamelCase , __lowerCamelCase ) else [questions] * n_passages
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"There should be as many titles than texts but got {len(__lowerCamelCase )} titles and {len(__lowerCamelCase )} texts." )
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , __lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = super().__call__(__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase )["input_ids"]
SCREAMING_SNAKE_CASE = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__lowerCamelCase , __lowerCamelCase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE = attention_mask
return self.pad(__lowerCamelCase , padding=__lowerCamelCase , max_length=__lowerCamelCase , return_tensors=__lowerCamelCase )
def _snake_case ( self : Tuple , __lowerCamelCase : BatchEncoding , __lowerCamelCase : DPRReaderOutput , __lowerCamelCase : int = 16 , __lowerCamelCase : int = 64 , __lowerCamelCase : int = 4 , ):
SCREAMING_SNAKE_CASE = reader_input["input_ids"]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = reader_output[:3]
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = sorted(range(__lowerCamelCase ) , reverse=__lowerCamelCase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__lowerCamelCase , top_spans=__lowerCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__lowerCamelCase , start_index=__lowerCamelCase , end_index=__lowerCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__lowerCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _snake_case ( self : Optional[int] , __lowerCamelCase : List[int] , __lowerCamelCase : List[int] , __lowerCamelCase : int , __lowerCamelCase : int , ):
SCREAMING_SNAKE_CASE = []
for start_index, start_score in enumerate(__lowerCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE = sorted(__lowerCamelCase , key=lambda __lowerCamelCase : x[1] , reverse=__lowerCamelCase )
SCREAMING_SNAKE_CASE = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
SCREAMING_SNAKE_CASE = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__lowerCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ = ["input_ids", "attention_mask"]
| 16
| 0
|
"""simple docstring"""
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
SCREAMING_SNAKE_CASE : Tuple = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
return max(metric_fn(lowerCAmelCase__ ,lowerCAmelCase__ ) for gt in ground_truths )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = [line.strip() for line in open(lowerCAmelCase__ ,'r' ).readlines()]
A__ = []
if args.gold_data_mode == "qa":
A__ = pd.read_csv(lowerCAmelCase__ ,sep='\t' ,header=lowerCAmelCase__ )
for answer_list in data[1]:
A__ = ast.literal_eval(lowerCAmelCase__ )
answers.append(lowerCAmelCase__ )
else:
A__ = [line.strip() for line in open(lowerCAmelCase__ ,'r' ).readlines()]
A__ = [[reference] for reference in references]
A__ = A__ = A__ = 0
for prediction, ground_truths in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
total += 1
em += metric_max_over_ground_truths(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
fa += metric_max_over_ground_truths(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
A__ = 100.0 * em / total
A__ = 100.0 * fa / total
logger.info(f'''F1: {fa:.2f}''' )
logger.info(f'''EM: {em:.2f}''' )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = args.k
A__ = [line.strip() for line in open(lowerCAmelCase__ ,'r' ).readlines()]
A__ = [line.strip() for line in open(lowerCAmelCase__ ,'r' ).readlines()]
A__ = A__ = 0
for hypo, reference in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = set(hypo.split('\t' )[:k] )
A__ = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
A__ = 100.0 * em / total
logger.info(f'''Precision@{k}: {em: .2f}''' )
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
def strip_title(lowerCAmelCase__ ):
if title.startswith('"' ):
A__ = title[1:]
if title.endswith('"' ):
A__ = title[:-1]
return title
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ ,return_tensors='pt' ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ ,)['input_ids'].to(args.device )
A__ = rag_model.rag.question_encoder(lowerCAmelCase__ )
A__ = question_enc_outputs[0]
A__ = rag_model.retriever(
lowerCAmelCase__ ,question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() ,prefix=rag_model.rag.generator.config.prefix ,n_docs=rag_model.config.n_docs ,return_tensors='pt' ,)
A__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
A__ = []
for docs in all_docs:
A__ = [strip_title(lowerCAmelCase__ ) for title in docs['title']]
provenance_strings.append('\t'.join(lowerCAmelCase__ ) )
return provenance_strings
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
with torch.no_grad():
A__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
lowerCAmelCase__ ,return_tensors='pt' ,padding=lowerCAmelCase__ ,truncation=lowerCAmelCase__ )
A__ = inputs_dict.input_ids.to(args.device )
A__ = inputs_dict.attention_mask.to(args.device )
A__ = rag_model.generate( # rag_model overwrites generate
lowerCAmelCase__ ,attention_mask=lowerCAmelCase__ ,num_beams=args.num_beams ,min_length=args.min_length ,max_length=args.max_length ,early_stopping=lowerCAmelCase__ ,num_return_sequences=1 ,bad_words_ids=[[0, 0]] ,)
A__ = rag_model.retriever.generator_tokenizer.batch_decode(lowerCAmelCase__ ,skip_special_tokens=lowerCAmelCase__ )
if args.print_predictions:
for q, a in zip(lowerCAmelCase__ ,lowerCAmelCase__ ):
logger.info('Q: {} - A: {}'.format(lowerCAmelCase__ ,lowerCAmelCase__ ) )
return answers
def __lowerCamelCase ( ):
A__ = argparse.ArgumentParser()
parser.add_argument(
'--model_type' ,choices=['rag_sequence', 'rag_token', 'bart'] ,type=lowerCAmelCase__ ,help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) ,)
parser.add_argument(
'--index_name' ,default=lowerCAmelCase__ ,choices=['exact', 'compressed', 'legacy'] ,type=lowerCAmelCase__ ,help='RAG model retriever type' ,)
parser.add_argument(
'--index_path' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,help='Path to the retrieval index' ,)
parser.add_argument('--n_docs' ,default=5 ,type=lowerCAmelCase__ ,help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='Path to pretrained checkpoints or model identifier from huggingface.co/models' ,)
parser.add_argument(
'--eval_mode' ,choices=['e2e', 'retrieval'] ,default='e2e' ,type=lowerCAmelCase__ ,help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) ,)
parser.add_argument('--k' ,default=1 ,type=lowerCAmelCase__ ,help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='Path to a file containing evaluation samples' ,)
parser.add_argument(
'--gold_data_path' ,default=lowerCAmelCase__ ,type=lowerCAmelCase__ ,required=lowerCAmelCase__ ,help='Path to a tab-separated file with gold samples' ,)
parser.add_argument(
'--gold_data_mode' ,default='qa' ,type=lowerCAmelCase__ ,choices=['qa', 'ans'] ,help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) ,)
parser.add_argument(
'--predictions_path' ,type=lowerCAmelCase__ ,default='predictions.txt' ,help='Name of the predictions file, to be stored in the checkpoints directory' ,)
parser.add_argument(
'--eval_all_checkpoints' ,action='store_true' ,help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' ,)
parser.add_argument(
'--eval_batch_size' ,default=8 ,type=lowerCAmelCase__ ,help='Batch size per GPU/CPU for evaluation.' ,)
parser.add_argument(
'--recalculate' ,help='Recalculate predictions even if the prediction file exists' ,action='store_true' ,)
parser.add_argument(
'--num_beams' ,default=4 ,type=lowerCAmelCase__ ,help='Number of beams to be used when generating answers' ,)
parser.add_argument('--min_length' ,default=1 ,type=lowerCAmelCase__ ,help='Min length of the generated answers' )
parser.add_argument('--max_length' ,default=50 ,type=lowerCAmelCase__ ,help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' ,action='store_true' ,help='If True, prints predictions while evaluating.' ,)
parser.add_argument(
'--print_docs' ,action='store_true' ,help='If True, prints docs retried while generating.' ,)
A__ = parser.parse_args()
A__ = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = {}
if args.model_type is None:
A__ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
A__ = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
A__ = args.n_docs
if args.index_name is not None:
A__ = args.index_name
if args.index_path is not None:
A__ = args.index_path
else:
A__ = BartForConditionalGeneration
A__ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' ,lowerCAmelCase__ )
A__ = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
A__ = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(lowerCAmelCase__ ,args.predictions_path ,args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(lowerCAmelCase__ ) )
logger.info(' Batch size = %d' ,args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
A__ = RagRetriever.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
A__ = model_class.from_pretrained(lowerCAmelCase__ ,retriever=lowerCAmelCase__ ,**lowerCAmelCase__ )
model.retriever.init_retrieval()
else:
A__ = model_class.from_pretrained(lowerCAmelCase__ ,**lowerCAmelCase__ )
model.to(args.device )
with open(args.evaluation_set ,'r' ) as eval_file, open(args.predictions_path ,'w' ) as preds_file:
A__ = []
for line in tqdm(lowerCAmelCase__ ):
questions.append(line.strip() )
if len(lowerCAmelCase__ ) == args.eval_batch_size:
A__ = evaluate_batch_fn(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
preds_file.write('\n'.join(lowerCAmelCase__ ) + '\n' )
preds_file.flush()
A__ = []
if len(lowerCAmelCase__ ) > 0:
A__ = evaluate_batch_fn(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
preds_file.write('\n'.join(lowerCAmelCase__ ) )
preds_file.flush()
score_fn(lowerCAmelCase__ ,args.predictions_path ,args.gold_data_path )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Tuple = get_args()
main(args)
| 720
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ = "cpu" ,lowerCAmelCase__ = None ):
A__ = torch.load(lowerCAmelCase__ ,map_location=lowerCAmelCase__ )
for k, v in tqdm(state_dict.items() ):
if not isinstance(lowerCAmelCase__ ,torch.Tensor ):
raise TypeError('FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin' )
A__ = v.half()
if save_path is None: # overwrite src_path
A__ = src_path
torch.save(lowerCAmelCase__ ,lowerCAmelCase__ )
if __name__ == "__main__":
fire.Fire(convert)
| 554
| 0
|
"""simple docstring"""
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
_lowerCamelCase : Tuple = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Dict = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features
_lowerCamelCase : Optional[Any] = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : Any = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Dict = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Tuple = parquet_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
_lowerCamelCase : Any = [parquet_path]
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_dataset(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=("train",) ) -> Dict:
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
_lowerCamelCase : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Any = tmp_path / "cache"
_lowerCamelCase : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_lowerCamelCase : Any = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : int = tmp_path / "cache"
_lowerCamelCase : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
_lowerCamelCase : str = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_lowerCamelCase : str = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
'''simple docstring'''
if split:
_lowerCamelCase : Union[str, Any] = {split: parquet_path}
else:
_lowerCamelCase : Optional[Any] = "train"
_lowerCamelCase : Optional[Any] = {"train": parquet_path, "test": parquet_path}
_lowerCamelCase : Optional[int] = tmp_path / "cache"
_lowerCamelCase : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_lowerCamelCase : Tuple = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Dict = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
_lowerCamelCase : List[Any] = pf.read()
assert dataset.data.table == output_table
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
_lowerCamelCase : Any = str(shared_datadir / "test_image_rgb.jpg" )
_lowerCamelCase : Optional[Any] = {"image": [image_path]}
_lowerCamelCase : List[str] = Features({"image": Image()} )
_lowerCamelCase : List[str] = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase )
_lowerCamelCase : List[Any] = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
_lowerCamelCase : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
_lowerCamelCase : List[str] = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
assert get_writer_batch_size(_lowerCamelCase ) == expected
| 46
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
A : int = ['''bart.large''', '''bart.large.mnli''', '''bart.large.cnn''', '''bart_xsum/model.pt''']
A : Any = {'''bart.large''': BartModel, '''bart.large.mnli''': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
A : Optional[int] = logging.get_logger(__name__)
A : int = ''' Hello world! cécé herlolip'''
A : List[Any] = [
('''model.classification_heads.mnli.dense.weight''', '''classification_head.dense.weight'''),
('''model.classification_heads.mnli.dense.bias''', '''classification_head.dense.bias'''),
('''model.classification_heads.mnli.out_proj.weight''', '''classification_head.out_proj.weight'''),
('''model.classification_heads.mnli.out_proj.bias''', '''classification_head.out_proj.bias'''),
]
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Tuple:
_lowercase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[str]:
_lowercase = dct.pop(SCREAMING_SNAKE_CASE_ )
_lowercase = val
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any ) -> Any:
_lowercase = torch.load(SCREAMING_SNAKE_CASE_ , map_location="""cpu""" )
_lowercase = torch.hub.load("""pytorch/fairseq""" , """bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[int]:
_lowercase , _lowercase = emb.weight.shape
_lowercase = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_lowercase = emb.weight.data
return lin_layer
@torch.no_grad()
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ) -> int:
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
_lowercase = torch.hub.load("""pytorch/fairseq""" , SCREAMING_SNAKE_CASE_ ).eval()
else:
_lowercase = load_xsum_checkpoint(SCREAMING_SNAKE_CASE_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
_lowercase = checkpoint_path.replace(""".""" , """-""" )
_lowercase = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.encode(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
_lowercase = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ ).encode(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).all():
raise ValueError(
f"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
_lowercase = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_lowercase = BartForSequenceClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = bart.predict("""mnli""" , SCREAMING_SNAKE_CASE_ , return_logits=SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ )[0] # logits
else: # no classification heads to worry about
_lowercase = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_lowercase = state_dict["""decoder.embed_tokens.weight"""]
_lowercase = bart.extract_features(SCREAMING_SNAKE_CASE_ )
if hf_checkpoint_name == "facebook/bart-large":
_lowercase = BartModel(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
_lowercase = model(SCREAMING_SNAKE_CASE_ ).model[0]
else:
_lowercase = BartForConditionalGeneration(SCREAMING_SNAKE_CASE_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ , """lm_head""" ):
_lowercase = make_linear_from_emb(model.model.shared )
_lowercase = model.model(SCREAMING_SNAKE_CASE_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
A : Any = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 287
| 0
|
from __future__ import annotations
def UpperCamelCase ( _A : float , _A : float , _A : float )-> float:
"""simple docstring"""
if days_between_payments <= 0:
raise ValueError("days_between_payments must be > 0" )
if daily_interest_rate < 0:
raise ValueError("daily_interest_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * daily_interest_rate * days_between_payments
def UpperCamelCase ( _A : float , _A : float , _A : float , )-> float:
"""simple docstring"""
if number_of_compounding_periods <= 0:
raise ValueError("number_of_compounding_periods must be > 0" )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError("nominal_annual_interest_rate_percentage must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def UpperCamelCase ( _A : float , _A : float , _A : float , )-> float:
"""simple docstring"""
if number_of_years <= 0:
raise ValueError("number_of_years must be > 0" )
if nominal_annual_percentage_rate < 0:
raise ValueError("nominal_annual_percentage_rate must be >= 0" )
if principal <= 0:
raise ValueError("principal must be > 0" )
return compound_interest(
_A , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
import requests
def UpperCamelCase ( _A : str , _A : str )-> None:
"""simple docstring"""
A__ = {"Content-Type": "application/json"}
A__ = requests.post(_A , json={"text": message_body} , headers=_A )
if response.status_code != 200:
A__ = (
"Request to slack returned an error "
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(_A )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 232
| 0
|
'''simple docstring'''
import string
def a__ ( lowerCAmelCase__ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase__ : Tuple = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase__ : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase__ )
UpperCAmelCase__ : List[str] = num - key
if num < 0:
UpperCAmelCase__ : Optional[Any] = num + len(string.ascii_uppercase )
UpperCAmelCase__ : Optional[int] = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase__ : Optional[Any] = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = input('''Encrypted message: ''' )
UpperCAmelCase__ : List[str] = message.upper()
decrypt(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 75
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
snake_case_ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 421
| 0
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class __A :
def _snake_case (self , __magic_name__ ):
raise NotImplementedError()
def _snake_case (self ):
raise NotImplementedError()
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ = False , **__magic_name__ ):
lowerCamelCase__ : Dict = tokenizer
lowerCamelCase__ : Any = skip_prompt
lowerCamelCase__ : Any = decode_kwargs
# variables used in the streaming process
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Union[str, Any] = True
def _snake_case (self , __magic_name__ ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("""TextStreamer only supports batch size 1""" )
elif len(value.shape ) > 1:
lowerCamelCase__ : Any = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCamelCase__ : Optional[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCamelCase__ : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("""\n""" ):
lowerCamelCase__ : Tuple = text[self.print_len :]
lowerCamelCase__ : Tuple = []
lowerCamelCase__ : int = 0
# If the last token is a CJK character, we print the characters.
elif len(__magic_name__ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCamelCase__ : Optional[int] = text[self.print_len :]
self.print_len += len(__magic_name__ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCamelCase__ : Tuple = text[self.print_len : text.rfind(""" """ ) + 1]
self.print_len += len(__magic_name__ )
self.on_finalized_text(__magic_name__ )
def _snake_case (self ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCamelCase__ : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCamelCase__ : Dict = text[self.print_len :]
lowerCamelCase__ : List[str] = []
lowerCamelCase__ : Optional[Any] = 0
else:
lowerCamelCase__ : List[Any] = """"""
lowerCamelCase__ : Dict = True
self.on_finalized_text(__magic_name__ , stream_end=__magic_name__ )
def _snake_case (self , __magic_name__ , __magic_name__ = False ):
print(__magic_name__ , flush=__magic_name__ , end="""""" if not stream_end else None )
def _snake_case (self , __magic_name__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e_00 and cp <= 0X9f_ff)
or (cp >= 0X34_00 and cp <= 0X4d_bf) #
or (cp >= 0X2_00_00 and cp <= 0X2_a6_df) #
or (cp >= 0X2_a7_00 and cp <= 0X2_b7_3f) #
or (cp >= 0X2_b7_40 and cp <= 0X2_b8_1f) #
or (cp >= 0X2_b8_20 and cp <= 0X2_ce_af) #
or (cp >= 0Xf9_00 and cp <= 0Xfa_ff)
or (cp >= 0X2_f8_00 and cp <= 0X2_fa_1f) #
): #
return True
return False
class __A ( A_ ):
def __init__(self , __magic_name__ , __magic_name__ = False , __magic_name__ = None , **__magic_name__ ):
super().__init__(__magic_name__ , __magic_name__ , **__magic_name__ )
lowerCamelCase__ : Optional[Any] = Queue()
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : Union[str, Any] = timeout
def _snake_case (self , __magic_name__ , __magic_name__ = False ):
self.text_queue.put(__magic_name__ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__(self ):
return self
def _snake_case (self ):
lowerCamelCase__ : Optional[int] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 715
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _A (UpperCamelCase : str ) ->None:
'''simple docstring'''
lowerCamelCase__ ,lowerCamelCase__ : List[str] = analyze_text(UpperCamelCase )
lowerCamelCase__ : Any = list(""" """ + ascii_lowercase )
# what is our total sum of probabilities.
lowerCamelCase__ : str = sum(single_char_strings.values() )
# one length string
lowerCamelCase__ : Union[str, Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
lowerCamelCase__ : List[Any] = single_char_strings[ch]
lowerCamelCase__ : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(UpperCamelCase ) # entropy formula.
# print entropy
print(f"{round(-1 * my_fir_sum ):.1f}" )
# two len string
lowerCamelCase__ : str = sum(two_char_strings.values() )
lowerCamelCase__ : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
lowerCamelCase__ : str = cha + cha
if sequence in two_char_strings:
lowerCamelCase__ : str = two_char_strings[sequence]
lowerCamelCase__ : int = int(UpperCamelCase ) / all_sum
my_sec_sum += prob * math.loga(UpperCamelCase )
# print second entropy
print(f"{round(-1 * my_sec_sum ):.1f}" )
# print the difference between them
print(f"{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}" )
def _A (UpperCamelCase : str ) ->tuple[dict, dict]:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = Counter() # type: ignore
lowerCamelCase__ : List[Any] = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(UpperCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _A () ->List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 96
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _A (*UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Union[Dict, Any]] = None , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Union[str, Any]=2 ) ->Optional[int]:
'''simple docstring'''
from .. import __version__
lowerCamelCase__ : Tuple = take_from
lowerCamelCase__ : Union[str, Any] = ()
if not isinstance(args[0] , UpperCamelCase ):
lowerCamelCase__ : Tuple = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCamelCase ).base_version ) >= version.parse(UpperCamelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : Optional[Any] = None
if isinstance(UpperCamelCase , UpperCamelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCamelCase ),)
lowerCamelCase__ : Dict = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCamelCase , UpperCamelCase ):
values += (getattr(UpperCamelCase , UpperCamelCase ),)
lowerCamelCase__ : int = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : List[Any] = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : Union[str, Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , UpperCamelCase , stacklevel=UpperCamelCase )
if isinstance(UpperCamelCase , UpperCamelCase ) and len(UpperCamelCase ) > 0:
lowerCamelCase__ : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Union[str, Any] = call_frame.filename
lowerCamelCase__ : Dict = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ ,lowerCamelCase__ : Tuple = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCamelCase ) == 0:
return
elif len(UpperCamelCase ) == 1:
return values[0]
return values
| 157
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157
| 1
|
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class lowercase_ (_UpperCAmelCase ):
# warning at import time
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''', _UpperCAmelCase, )
| 612
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class lowercase_ (unittest.TestCase ):
def lowerCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=a_ , )
assert hasattr(self , "env" )
def lowerCamelCase__ ( self , a_=1 ) ->int:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-single''' , instance_count=a_ , instance_type=self.instance_type , debugger_hook_config=a_ , hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="py36" , )
def lowerCamelCase__ ( self , a_ ) ->str:
'''simple docstring'''
TrainingJobAnalytics(a_ ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
def lowerCamelCase__ ( self ) ->Any:
'''simple docstring'''
_a = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
_a = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , a_ )
| 612
| 1
|
'''simple docstring'''
import string
import numpy
def snake_case__ ( UpperCamelCase ,UpperCamelCase ) -> int:
return b if a == 0 else greatest_common_divisor(b % a ,__lowercase )
class UpperCAmelCase :
"""simple docstring"""
A__ : List[Any] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
A__ : Dict = numpy.vectorize(lambda a_ : x % 36 )
A__ : str = numpy.vectorize(__snake_case )
def __init__( self , _snake_case ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.modulus(lowercase_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_UpperCamelCase : Any = encrypt_key.shape[0]
def _lowercase ( self , _snake_case ) -> List[Any]:
return self.key_string.index(lowercase_ )
def _lowercase ( self , _snake_case ) -> Union[str, Any]:
return self.key_string[round(lowercase_ )]
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : int = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCamelCase : Tuple = det % len(self.key_string )
_UpperCamelCase : Tuple = len(self.key_string )
if greatest_common_divisor(lowercase_ , len(self.key_string ) ) != 1:
_UpperCamelCase : Optional[int] = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(lowercase_ )
def _lowercase ( self , _snake_case ) -> Tuple:
_UpperCamelCase : Union[str, Any] = [char for char in text.upper() if char in self.key_string]
_UpperCamelCase : Dict = chars[-1]
while len(lowercase_ ) % self.break_key != 0:
chars.append(lowercase_ )
return "".join(lowercase_ )
def _lowercase ( self , _snake_case ) -> List[str]:
_UpperCamelCase : str = self.process_text(text.upper() )
_UpperCamelCase : Tuple = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_UpperCamelCase : Optional[int] = text[i : i + self.break_key]
_UpperCamelCase : Union[str, Any] = [self.replace_letters(lowercase_ ) for char in batch]
_UpperCamelCase : int = numpy.array([vec] ).T
_UpperCamelCase : List[Any] = self.modulus(self.encrypt_key.dot(lowercase_ ) ).T.tolist()[
0
]
_UpperCamelCase : str = "".join(
self.replace_digits(lowercase_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def _lowercase ( self ) -> Tuple:
_UpperCamelCase : Any = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_UpperCamelCase : int = det % len(self.key_string )
_UpperCamelCase : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_UpperCamelCase : Optional[Any] = i
break
_UpperCamelCase : int = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(lowercase_ ) )
def _lowercase ( self , _snake_case ) -> Any:
_UpperCamelCase : Union[str, Any] = self.make_decrypt_key()
_UpperCamelCase : Union[str, Any] = self.process_text(text.upper() )
_UpperCamelCase : str = ""
for i in range(0 , len(lowercase_ ) - self.break_key + 1 , self.break_key ):
_UpperCamelCase : int = text[i : i + self.break_key]
_UpperCamelCase : str = [self.replace_letters(lowercase_ ) for char in batch]
_UpperCamelCase : Dict = numpy.array([vec] ).T
_UpperCamelCase : int = self.modulus(decrypt_key.dot(lowercase_ ) ).T.tolist()[0]
_UpperCamelCase : Dict = "".join(
self.replace_digits(lowercase_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def snake_case__ ( ) -> None:
_UpperCamelCase : Any = int(input('''Enter the order of the encryption key: ''' ) )
_UpperCamelCase : Optional[Any] = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(__lowercase ):
_UpperCamelCase : Tuple = [int(__lowercase ) for x in input().split()]
hill_matrix.append(__lowercase )
_UpperCamelCase : Tuple = HillCipher(numpy.array(__lowercase ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
_UpperCamelCase : Tuple = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
_UpperCamelCase : int = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(__lowercase ) )
elif option == "2":
_UpperCamelCase : Optional[Any] = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(__lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 683
|
from manim import *
class lowercase_ ( __snake_case ):
def UpperCamelCase ( self ):
_snake_case : Tuple = Rectangle(height=0.5 , width=0.5 )
_snake_case : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_snake_case : List[str] = [mem.copy() for i in range(6 )]
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : int = Text("CPU" , font_size=24 )
_snake_case : str = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
_snake_case : int = [mem.copy() for i in range(4 )]
_snake_case : Dict = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : str = Text("GPU" , font_size=24 )
_snake_case : Optional[int] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.move_to([-1, -1, 0] )
self.add(lowercase_ )
_snake_case : Any = [mem.copy() for i in range(6 )]
_snake_case : Any = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Dict = Text("Model" , font_size=24 )
_snake_case : Dict = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.add(lowercase_ )
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
rect.set_stroke(lowercase_ )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_snake_case : Union[str, Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowercase_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=lowercase_ , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=lowercase_ , buff=0.0 )
self.add(lowercase_ )
cpu_targs.append(lowercase_ )
_snake_case : List[Any] = [mem.copy() for i in range(6 )]
_snake_case : Union[str, Any] = VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
_snake_case : Optional[Any] = Text("Loaded Checkpoint" , font_size=24 )
_snake_case : Union[str, Any] = Group(lowercase_ , lowercase_ ).arrange(lowercase_ , aligned_edge=lowercase_ , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_snake_case : Optional[int] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_snake_case : Optional[Any] = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowercase_ , lowercase_ )
_snake_case : Union[str, Any] = MarkupText(
f"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , )
blue_text.next_to(lowercase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_snake_case : List[Any] = MarkupText(
f"""Next, a <i><span fgcolor=\"{BLUE}\">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor=\"{BLUE}\">single shard</span>.""" , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ ) , Write(lowercase_ ) )
self.play(Write(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) )
_snake_case : int = []
_snake_case : str = []
for i, rect in enumerate(lowercase_ ):
_snake_case : Dict = fill.copy().set_fill(lowercase_ , opacity=0.7 )
target.move_to(lowercase_ )
first_animations.append(GrowFromCenter(lowercase_ , run_time=1 ) )
_snake_case : Dict = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 670
| 0
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __a ( A__ , A__ , A__ ) -> Dict:
lowerCAmelCase = 0
if start < end:
lowerCAmelCase = randint(A__ , A__ )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase , lowerCAmelCase = _in_place_partition(A__ , A__ , A__ )
count += _in_place_quick_sort(A__ , A__ , p - 1 )
count += _in_place_quick_sort(A__ , p + 1 , A__ )
return count
def __a ( A__ , A__ , A__ ) -> Dict:
lowerCAmelCase = 0
lowerCAmelCase = randint(A__ , A__ )
lowerCAmelCase = a[end]
lowerCAmelCase = a[pivot]
lowerCAmelCase = temp
lowerCAmelCase = start - 1
for index in range(A__ , A__ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase = new_pivot_index + 1
lowerCAmelCase = a[new_pivot_index]
lowerCAmelCase = a[index]
lowerCAmelCase = temp
lowerCAmelCase = a[new_pivot_index + 1]
lowerCAmelCase = a[end]
lowerCAmelCase = temp
return new_pivot_index + 1, count
lowercase : Tuple = TemporaryFile()
lowercase : str = 1_0_0 # 1000 elements are to be sorted
lowercase : Dict = 0, 1 # mean and standard deviation
lowercase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
lowercase : str = np.load(outfile)
lowercase : Any = len(M) - 1
lowercase : Tuple = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 705
|
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Any=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=3_2 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : str=3 , SCREAMING_SNAKE_CASE : Tuple=1_6 , SCREAMING_SNAKE_CASE : Any=[1, 2, 1] , SCREAMING_SNAKE_CASE : str=[2, 2, 4] , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : str=2.0 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Tuple=0.0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[int]="gelu" , SCREAMING_SNAKE_CASE : Dict=False , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : str=0.0_2 , SCREAMING_SNAKE_CASE : Tuple=1E-5 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : List[str]=1_0 , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : str=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE : str=[1, 2, 3] , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
def __A ( self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __A ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __A ( self : int , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = model(SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = ["stem"]
lowerCAmelCase = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def __A ( self : Union[str, Any] ) -> str:
"""simple docstring"""
pass
def __A ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __A ( self : Any ) -> int:
"""simple docstring"""
return
def __A ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE )
@unittest.skip("Swin does not use inputs_embeds" )
def __A ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def __A ( self : Dict ) -> int:
"""simple docstring"""
pass
def __A ( self : Dict ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def __A ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def __A ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def __A ( self : Any , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __A ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __A ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def __A ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def __A ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def __A ( self : str ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE : Optional[int] ):
lowerCAmelCase = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple={} ):
with torch.no_grad():
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(**SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
lowerCAmelCase = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
check_equivalence(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , {"output_hidden_states": True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , UpperCamelCase_ ):
"""simple docstring"""
lowerCAmelCase = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCAmelCase = MaskFormerSwinConfig
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = MaskFormerSwinModelTester(self )
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase = backbone_class(SCREAMING_SNAKE_CASE )
backbone.to(SCREAMING_SNAKE_CASE )
backbone.eval()
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase = backbone(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 159
| 0
|
import random
from typing import Any
def A ( lowercase__ : list ) -> list[Any]:
for _ in range(len(lowercase__ ) ):
UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 )
UpperCamelCase__ :Optional[int] = random.randint(0 , len(lowercase__ ) - 1 )
UpperCamelCase__ , UpperCamelCase__ :List[str] = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 45
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
'configuration_cpmant': ['CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CpmAntConfig'],
'tokenization_cpmant': ['CpmAntTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST',
'CpmAntForCausalLM',
'CpmAntModel',
'CpmAntPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 296
| 0
|
lowercase : int = """Input must be a string of 8 numbers plus letter"""
lowercase : Tuple = """TRWAGMYFPDXBNJZSQVHLCKE"""
def A_ ( A__ ) -> bool:
if not isinstance(A__ , A__ ):
a__ : Any = F'Expected string as input, found {type(A__ ).__name__}'
raise TypeError(A__ )
a__ : int = spanish_id.replace('-' , '' ).upper()
if len(A__ ) != 9:
raise ValueError(A__ )
try:
a__ : Union[str, Any] = int(spanish_id_clean[0:8] )
a__ : Optional[int] = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(A__ ) from ex
if letter.isdigit():
raise ValueError(A__ )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 392
|
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowercase : Any = logging.get_logger(__name__)
def A_ ( A__ , A__ ) -> List[Any]:
a__ : List[str] = set()
a__ : Union[str, Any] = []
def parse_line(A__ ):
for line in fp:
if isinstance(A__ , A__ ):
a__ : str = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(A__ ) > 0:
a__ : Dict = '\n'.join(A__ )
# Only keep the warnings specified in `targets`
if any(F': {x}: ' in warning for x in targets ):
selected_warnings.add(A__ )
buffer.clear()
continue
else:
a__ : int = line.strip()
buffer.append(A__ )
if from_gh:
for filename in os.listdir(A__ ):
a__ : List[Any] = os.path.join(A__ , A__ )
if not os.path.isdir(A__ ):
# read the file
if filename != "warnings.txt":
continue
with open(A__ ) as fp:
parse_line(A__ )
else:
try:
with zipfile.ZipFile(A__ ) as z:
for filename in z.namelist():
if not os.path.isdir(A__ ):
# read the file
if filename != "warnings.txt":
continue
with z.open(A__ ) as fp:
parse_line(A__ )
except Exception:
logger.warning(
F'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def A_ ( A__ , A__ ) -> Dict:
a__ : List[str] = set()
a__ : Tuple = [os.path.join(A__ , A__ ) for p in os.listdir(A__ ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(A__ , A__ ) )
return selected_warnings
if __name__ == "__main__":
def A_ ( A__ ) -> Tuple:
return values.split(',' )
lowercase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""")
parser.add_argument(
"""--output_dir""",
type=str,
required=True,
help="""Where to store the downloaded artifacts and other result files.""",
)
parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""")
# optional parameters
parser.add_argument(
"""--targets""",
default="""DeprecationWarning,UserWarning,FutureWarning""",
type=list_str,
help="""Comma-separated list of target warning(s) which we want to extract.""",
)
parser.add_argument(
"""--from_gh""",
action="""store_true""",
help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""",
)
lowercase : List[Any] = parser.parse_args()
lowercase : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowercase : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("""=""" * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowercase : Dict = extract_warnings(args.output_dir, args.targets)
lowercase : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 392
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
snake_case_ : Any = logging.get_logger(__name__)
snake_case_ : Optional[Any] = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
snake_case_ : int = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def lowercase__( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : List[str] )-> Optional[Any]:
"""simple docstring"""
for attribute in key.split("." ):
_UpperCamelCase = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_UpperCamelCase = getattr(lowercase_ , lowercase_ ).shape
else:
_UpperCamelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
_UpperCamelCase = value
elif weight_type == "weight_g":
_UpperCamelCase = value
elif weight_type == "weight_v":
_UpperCamelCase = value
elif weight_type == "bias":
_UpperCamelCase = value
else:
_UpperCamelCase = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def lowercase__( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] )-> Dict:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = fairseq_model.state_dict()
_UpperCamelCase = hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == "group" , )
_UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_UpperCamelCase = True
if "*" in mapped_key:
_UpperCamelCase = name.split(lowercase_ )[0].split("." )[-2]
_UpperCamelCase = mapped_key.replace("*" , lowercase_ )
if "weight_g" in name:
_UpperCamelCase = "weight_g"
elif "weight_v" in name:
_UpperCamelCase = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
_UpperCamelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCamelCase = "weight"
else:
_UpperCamelCase = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(f"Unused weights: {unused_weights}" )
def lowercase__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = full_name.split("conv_layers." )[-1]
_UpperCamelCase = name.split("." )
_UpperCamelCase = int(items[0] )
_UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
_UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
_UpperCamelCase = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
_UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
_UpperCamelCase = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def lowercase__( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str]=None )-> List[Any]:
"""simple docstring"""
_UpperCamelCase = torch.load(lowercase_ )
_UpperCamelCase = WavLMConfigOrig(checkpoint["cfg"] )
_UpperCamelCase = WavLMOrig(lowercase_ )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
_UpperCamelCase = WavLMConfig.from_pretrained(lowercase_ )
else:
_UpperCamelCase = WavLMConfig()
_UpperCamelCase = WavLMModel(lowercase_ )
recursively_load_weights(lowercase_ , lowercase_ )
hf_wavlm.save_pretrained(lowercase_ )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
snake_case_ : Optional[Any] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 138
|
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(1 ) != 0 )
def SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 87
| 0
|
import requests
_UpperCAmelCase : List[Any] = "" # <-- Put your OpenWeatherMap appid here!
_UpperCAmelCase : str = "https://api.openweathermap.org/data/2.5/"
def A ( lowercase = "Chicago" , lowercase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'weather' , params=locals() ).json()
def A ( lowercase = "Kolkata, India" , lowercase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'forecast' , params=locals() ).json()
def A ( lowercase = 5_5.6_8 , lowercase = 1_2.5_7 , lowercase = APPID ) -> dict:
'''simple docstring'''
return requests.get(URL_BASE + 'onecall' , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_UpperCAmelCase : Union[str, Any] = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 3
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_UpperCAmelCase : Any = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
for attribute in key.split('.' ):
UpperCamelCase = getattr(lowercase , lowercase )
if weight_type is not None:
UpperCamelCase = getattr(lowercase , lowercase ).shape
else:
UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCamelCase = value
elif weight_type == "weight_g":
UpperCamelCase = value
elif weight_type == "weight_v":
UpperCamelCase = value
elif weight_type == "bias":
UpperCamelCase = value
elif weight_type == "running_mean":
UpperCamelCase = value
elif weight_type == "running_var":
UpperCamelCase = value
elif weight_type == "num_batches_tracked":
UpperCamelCase = value
elif weight_type == "inv_freq":
UpperCamelCase = value
else:
UpperCamelCase = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def A ( lowercase , lowercase , lowercase ) -> Any:
'''simple docstring'''
UpperCamelCase = []
UpperCamelCase = fairseq_model.state_dict()
UpperCamelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
lowercase , lowercase , lowercase , lowercase , hf_model.config.feat_extract_norm == 'group' , )
UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCamelCase = True
if "*" in mapped_key:
UpperCamelCase = name.split(lowercase )[0].split('.' )[-2]
UpperCamelCase = mapped_key.replace('*' , lowercase )
if "pos_bias_u" in name:
UpperCamelCase = None
elif "pos_bias_v" in name:
UpperCamelCase = None
elif "weight_g" in name:
UpperCamelCase = 'weight_g'
elif "weight_v" in name:
UpperCamelCase = 'weight_v'
elif "bias" in name:
UpperCamelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase = 'weight'
elif "running_mean" in name:
UpperCamelCase = 'running_mean'
elif "inv_freq" in name:
UpperCamelCase = 'inv_freq'
elif "running_var" in name:
UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
UpperCamelCase = 'num_batches_tracked'
else:
UpperCamelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def A ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = full_name.split('conv_layers.' )[-1]
UpperCamelCase = name.split('.' )
UpperCamelCase = int(items[0] )
UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCamelCase = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowercase )
@torch.no_grad()
def A ( lowercase , lowercase , lowercase=None , lowercase=None , lowercase=True ) -> int:
'''simple docstring'''
if config_path is not None:
UpperCamelCase = WavaVecaConformerConfig.from_pretrained(lowercase , hidden_act='swish' )
else:
UpperCamelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
UpperCamelCase = 'rotary'
if is_finetuned:
if dict_path:
UpperCamelCase = Dictionary.load(lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase = target_dict.pad_index
UpperCamelCase = target_dict.bos_index
UpperCamelCase = target_dict.eos_index
UpperCamelCase = len(target_dict.symbols )
UpperCamelCase = os.path.join(lowercase , 'vocab.json' )
if not os.path.isdir(lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowercase ) )
return
os.makedirs(lowercase , exist_ok=lowercase )
UpperCamelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCamelCase = 0
UpperCamelCase = 1
with open(lowercase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowercase , lowercase )
UpperCamelCase = WavaVecaCTCTokenizer(
lowercase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowercase , )
UpperCamelCase = True if config.feat_extract_norm == 'layer' else False
UpperCamelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowercase , return_attention_mask=lowercase , )
UpperCamelCase = WavaVecaProcessor(feature_extractor=lowercase , tokenizer=lowercase )
processor.save_pretrained(lowercase )
UpperCamelCase = WavaVecaConformerForCTC(lowercase )
else:
UpperCamelCase = WavaVecaConformerForPreTraining(lowercase )
if is_finetuned:
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
UpperCamelCase = argparse.Namespace(task='audio_pretraining' )
UpperCamelCase = fairseq.tasks.setup_task(lowercase )
UpperCamelCase , UpperCamelCase , UpperCamelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase )
UpperCamelCase = model[0].eval()
recursively_load_weights(lowercase , lowercase , not is_finetuned )
hf_wavavec.save_pretrained(lowercase )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_UpperCAmelCase : Dict = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 3
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A__ : Dict = logging.get_logger(__name__)
A__ : List[str] = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = 'vit'
def __init__( self , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=2_24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=16 , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[int] = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Dict = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : Any = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : List[Any] = qkv_bias
__lowerCamelCase : Union[str, Any] = encoder_stride
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = version.parse('1.11' )
@property
def lowercase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowercase_ ( self ) -> float:
return 1E-4
| 13
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Optional[int] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "xlm-roberta"
def __init__( self , snake_case=3_0_5_2_2 , snake_case=7_6_8 , snake_case=1_2 , snake_case=1_2 , snake_case=3_0_7_2 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=2 , snake_case=0.02 , snake_case=1e-12 , snake_case=1 , snake_case=0 , snake_case=2 , snake_case="absolute" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
UpperCAmelCase : Any = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Optional[int] = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Union[str, Any] = intermediate_size
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : List[str] = attention_probs_dropout_prob
UpperCAmelCase : List[str] = max_position_embeddings
UpperCAmelCase : List[Any] = type_vocab_size
UpperCAmelCase : Any = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : Any = position_embedding_type
UpperCAmelCase : Tuple = use_cache
UpperCAmelCase : Optional[int] = classifier_dropout
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
@property
def A_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCAmelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 718
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
a : Any = logging.get_logger(__name__)
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[Any] = question_encoder
UpperCAmelCase : Tuple = generator
UpperCAmelCase : int = self.question_encoder
def A_ ( self , snake_case ):
'''simple docstring'''
if os.path.isfile(snake_case ):
raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file" )
os.makedirs(snake_case , exist_ok=snake_case )
UpperCAmelCase : Union[str, Any] = os.path.join(snake_case , "question_encoder_tokenizer" )
UpperCAmelCase : Dict = os.path.join(snake_case , "generator_tokenizer" )
self.question_encoder.save_pretrained(snake_case )
self.generator.save_pretrained(snake_case )
@classmethod
def A_ ( cls , snake_case , **snake_case ):
'''simple docstring'''
from ..auto.tokenization_auto import AutoTokenizer
UpperCAmelCase : Dict = kwargs.pop("config" , snake_case )
if config is None:
UpperCAmelCase : int = RagConfig.from_pretrained(snake_case )
UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
snake_case , config=config.question_encoder , subfolder="question_encoder_tokenizer" )
UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case , config=config.generator , subfolder="generator_tokenizer" )
return cls(question_encoder=snake_case , generator=snake_case )
def __call__( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.current_tokenizer(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.generator.batch_decode(*snake_case , **snake_case )
def A_ ( self , *snake_case , **snake_case ):
'''simple docstring'''
return self.generator.decode(*snake_case , **snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.question_encoder
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.generator
def A_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = "longest" , snake_case = None , snake_case = True , **snake_case , ):
'''simple docstring'''
warnings.warn(
"`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the "
"regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` "
"context manager to prepare your targets. See the documentation of your specific tokenizer for more "
"details" , snake_case , )
if max_length is None:
UpperCAmelCase : Dict = self.current_tokenizer.model_max_length
UpperCAmelCase : List[str] = self(
snake_case , add_special_tokens=snake_case , return_tensors=snake_case , max_length=snake_case , padding=snake_case , truncation=snake_case , **snake_case , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
UpperCAmelCase : Optional[int] = self.current_tokenizer.model_max_length
UpperCAmelCase : Union[str, Any] = self(
text_target=snake_case , add_special_tokens=snake_case , return_tensors=snake_case , padding=snake_case , max_length=snake_case , truncation=snake_case , **snake_case , )
UpperCAmelCase : Optional[Any] = labels["input_ids"]
return model_inputs
| 609
| 0
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a__ : Optional[int] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : int , a__ : Dict , a__ : List[str] , a__ : Union[str, Any]=None , a__ : int=1 ):
UpperCAmelCase = tokenizer
UpperCAmelCase = dataset
UpperCAmelCase = len(a__ ) if n_tasks is None else n_tasks
UpperCAmelCase = n_copies
def __iter__( self : int ):
UpperCAmelCase = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
UpperCAmelCase = self.tokenizer(a__ , padding=a__ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : Any , a__ : List[Any] , a__ : List[str] , a__ : Optional[Any] ):
UpperCAmelCase = start_length
UpperCAmelCase = eof_strings
UpperCAmelCase = tokenizer
def __call__( self : Optional[int] , a__ : Optional[Any] , a__ : Dict , **a__ : List[str] ):
UpperCAmelCase = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
UpperCAmelCase = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(a__ )
def __snake_case ( SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase = re.split('''(%s)''' % '''|'''.join(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# last string should be ""
return "".join(string_list[:-2] )
def __snake_case ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=20 , **SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = defaultdict(SCREAMING_SNAKE_CASE_ ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(SCREAMING_SNAKE_CASE_ ) ):
with torch.no_grad():
UpperCAmelCase = batch['''ids'''].shape[-1]
UpperCAmelCase = accelerator.unwrap_model(SCREAMING_SNAKE_CASE_ ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
# each task is generated batch_size times
UpperCAmelCase = batch['''task_id'''].repeat(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = accelerator.pad_across_processes(
SCREAMING_SNAKE_CASE_ , dim=1 , pad_index=tokenizer.pad_token_id )
UpperCAmelCase, UpperCAmelCase = accelerator.gather((generated_tokens, generated_tasks) )
UpperCAmelCase = generated_tokens.cpu().numpy()
UpperCAmelCase = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
gen_token_dict[task].append(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = [[] for _ in range(SCREAMING_SNAKE_CASE_ )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
UpperCAmelCase = tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ )
code_gens[task].append(remove_last_block(SCREAMING_SNAKE_CASE_ ) )
return code_gens
def __snake_case ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = HfArgumentParser(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
UpperCAmelCase = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
UpperCAmelCase = '''false'''
if args.num_workers is None:
UpperCAmelCase = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
UpperCAmelCase = Accelerator()
set_seed(args.seed , device_specific=SCREAMING_SNAKE_CASE_ )
# Load model and tokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase = tokenizer.eos_token
UpperCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
UpperCAmelCase = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )] ),
}
# Load evaluation dataset and metric
UpperCAmelCase = load_dataset('''openai_humaneval''' )
UpperCAmelCase = load_metric('''code_eval''' )
UpperCAmelCase = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
UpperCAmelCase = args.n_samples // args.batch_size
UpperCAmelCase = TokenizedDataset(SCREAMING_SNAKE_CASE_ , human_eval['''test'''] , n_copies=SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ )
# do not confuse args.batch_size, which is actually the num_return_sequences
UpperCAmelCase = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
UpperCAmelCase = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
UpperCAmelCase, UpperCAmelCase = accelerator.prepare(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCAmelCase = complete_code(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , n_tasks=SCREAMING_SNAKE_CASE_ , batch_size=args.batch_size , **SCREAMING_SNAKE_CASE_ , )
if accelerator.is_main_process:
UpperCAmelCase = []
for task in tqdm(range(SCREAMING_SNAKE_CASE_ ) ):
UpperCAmelCase = human_eval['''test'''][task]['''test''']
UpperCAmelCase = f"check({human_eval['test'][task]['entry_point']})"
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
UpperCAmelCase, UpperCAmelCase = code_eval_metric.compute(
references=SCREAMING_SNAKE_CASE_ , predictions=SCREAMING_SNAKE_CASE_ , num_workers=args.num_workers )
print(f"Results: {pass_at_k}" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 51
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCamelCase : List[str] = "LayoutLMv2ImageProcessor"
lowerCamelCase : Dict = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__(self , A=None , A=None , **A ):
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A , )
lowerCamelCase_ : Any = kwargs.pop('''feature_extractor''' )
lowerCamelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(A , A )
def __call__(self , A , A = None , A = None , A = None , A = None , A = True , A = False , A = None , A = None , A = 0 , A = None , A = None , A = None , A = False , A = False , A = False , A = False , A = True , A = None , **A , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowerCamelCase_ : Union[str, Any] = self.image_processor(images=A , return_tensors=A )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A , A ):
lowerCamelCase_ : str = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCamelCase_ : Tuple = features['''words''']
lowerCamelCase_ : Any = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=A , add_special_tokens=A , padding=A , truncation=A , max_length=A , stride=A , pad_to_multiple_of=A , return_token_type_ids=A , return_attention_mask=A , return_overflowing_tokens=A , return_special_tokens_mask=A , return_offsets_mapping=A , return_length=A , verbose=A , return_tensors=A , **A , )
# add pixel values
lowerCamelCase_ : Dict = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowerCamelCase_ : Union[str, Any] = self.get_overflowing_images(A , encoded_inputs['''overflow_to_sample_mapping'''] )
lowerCamelCase_ : List[str] = images
return encoded_inputs
def UpperCAmelCase__ (self , A , A ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowerCamelCase_ : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A ) != len(A ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(A )} and {len(A )}""" )
return images_with_overflow
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.batch_decode(*A , **A )
def UpperCAmelCase__ (self , *A , **A ):
return self.tokenizer.decode(*A , **A )
@property
def UpperCAmelCase__ (self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , A , )
return self.image_processor_class
@property
def UpperCAmelCase__ (self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , A , )
return self.image_processor
| 422
| 0
|
"""simple docstring"""
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __lowerCAmelCase ( __a ):
lowercase = ['''image_processor''', '''tokenizer''']
lowercase = '''BlipImageProcessor'''
lowercase = '''AutoTokenizer'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(snake_case__ , snake_case__ )
# add QFormer tokenizer
__UpperCamelCase = qformer_tokenizer
def __call__( self , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = False , __UpperCAmelCase = True , __UpperCAmelCase = None , **__UpperCAmelCase , ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify at least images or text.' )
__UpperCamelCase = BatchFeature()
if text is not None:
__UpperCamelCase = self.tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
encoding.update(snake_case__ )
__UpperCamelCase = self.qformer_tokenizer(
text=snake_case__ , add_special_tokens=snake_case__ , padding=snake_case__ , truncation=snake_case__ , max_length=snake_case__ , stride=snake_case__ , pad_to_multiple_of=snake_case__ , return_attention_mask=snake_case__ , return_overflowing_tokens=snake_case__ , return_special_tokens_mask=snake_case__ , return_offsets_mapping=snake_case__ , return_token_type_ids=snake_case__ , return_length=snake_case__ , verbose=snake_case__ , return_tensors=snake_case__ , **snake_case__ , )
__UpperCamelCase = qformer_text_encoding.pop('input_ids' )
__UpperCamelCase = qformer_text_encoding.pop('attention_mask' )
if images is not None:
__UpperCamelCase = self.image_processor(snake_case__ , return_tensors=snake_case__ )
encoding.update(snake_case__ )
return encoding
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def UpperCAmelCase ( self , *__UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.tokenizer.model_input_names
__UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def UpperCAmelCase ( self , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
if os.path.isfile(snake_case__ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
__UpperCamelCase = os.path.join(snake_case__ , 'qformer_tokenizer' )
self.qformer_tokenizer.save_pretrained(snake_case__ )
return super().save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def UpperCAmelCase ( cls , __UpperCAmelCase , **__UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained(snake_case__ , subfolder='qformer_tokenizer' )
__UpperCamelCase = cls._get_arguments_from_pretrained(snake_case__ , **snake_case__ )
args.append(snake_case__ )
return cls(*snake_case__ )
| 706
|
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> int:
return int(input_a == input_a == 0 )
def A ( ) -> None:
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(f'| 0 | 0 | {nor_gate(0 , 0 )} |' )
print(f'| 0 | 1 | {nor_gate(0 , 1 )} |' )
print(f'| 1 | 0 | {nor_gate(1 , 0 )} |' )
print(f'| 1 | 1 | {nor_gate(1 , 1 )} |' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293
| 0
|
import argparse
import json
import subprocess
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = []
SCREAMING_SNAKE_CASE_ : Optional[int] = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
SCREAMING_SNAKE_CASE_ : str = subprocess.run(lowerCamelCase_ , shell=lowerCamelCase_ , stdout=subprocess.PIPE )
SCREAMING_SNAKE_CASE_ : Any = output.stdout.decode('utf-8' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = json.loads(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowerCamelCase_ )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) )
if len(lowerCamelCase_ ) > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def __UpperCAmelCase ( lowerCamelCase_ : List[str] ) -> Tuple:
"""simple docstring"""
return values.split(',' )
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
UpperCamelCase__ : List[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 105
|
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
lowerCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
if "xprophetnet" in prophetnet_checkpoint_path:
UpperCAmelCase_ = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
else:
UpperCAmelCase_ = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase__ )
UpperCAmelCase_ , UpperCAmelCase_ = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
UpperCAmelCase_ = ["key_proj", "value_proj", "query_proj"]
UpperCAmelCase_ = {
"self_attn": "ngram_self_attn",
"cross_attn": "encoder_attn",
"cross_attn_layer_norm": "encoder_attn_layer_norm",
"feed_forward_layer_norm": "final_layer_norm",
"feed_forward": "",
"intermediate": "fc1",
"output": "fc2",
"key_proj": "k_proj",
"query_proj": "q_proj",
"value_proj": "v_proj",
"word_embeddings": "embed_tokens",
"embeddings_layer_norm": "emb_layer_norm",
"relative_pos_embeddings": "relative_linear",
"ngram_embeddings": "ngram_input_embed",
"position_embeddings": "embed_positions",
}
for key in loading_info["missing_keys"]:
UpperCAmelCase_ = key.split("." )
if attributes[0] == "lm_head":
UpperCAmelCase_ = prophet
UpperCAmelCase_ = prophet_old
else:
UpperCAmelCase_ = prophet.prophetnet
UpperCAmelCase_ = prophet_old.model
UpperCAmelCase_ = False
for attribute in attributes:
if attribute in mapping:
UpperCAmelCase_ = mapping[attribute]
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) > 0:
UpperCAmelCase_ = attribute
elif hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase_ = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
UpperCAmelCase_ = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
UpperCAmelCase_ = old_model.bias
logger.info(f"""{attribute} is initialized""" )
UpperCAmelCase_ = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase__ , "in_proj_weight" ):
UpperCAmelCase_ = old_model.in_proj_weight.shape[0] // 3
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
UpperCAmelCase_ = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
UpperCAmelCase_ = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
UpperCAmelCase_ = nn.Parameter(old_model.embed_positions.weight[:512, :] )
UpperCAmelCase_ = True
break
if attribute.isdigit():
UpperCAmelCase_ = model[int(lowerCAmelCase__ )]
UpperCAmelCase_ = old_model[int(lowerCAmelCase__ )]
else:
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if old_attribute == "":
UpperCAmelCase_ = old_model
else:
if not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
UpperCAmelCase_ = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 82
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __lowerCamelCase ( _lowercase ) -> List[Any]:
UpperCamelCase = {}
UpperCamelCase = tokenizer(example['content'] , truncation=_lowercase )['input_ids']
UpperCamelCase = len(example['content'] ) / len(output['input_ids'] )
return output
_snake_case = HfArgumentParser(PretokenizationArguments)
_snake_case = parser.parse_args()
if args.num_workers is None:
_snake_case = multiprocessing.cpu_count()
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
_snake_case = time.time()
_snake_case = load_dataset(args.dataset_name, split='''train''')
print(F"Dataset loaded in {time.time()-t_start:.2f}s")
_snake_case = time.time()
_snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'''repo_name''',
'''path''',
'''copies''',
'''size''',
'''content''',
'''license''',
'''hash''',
'''line_mean''',
'''line_max''',
'''alpha_frac''',
'''autogenerated''',
],
)
print(F"Dataset tokenized in {time.time()-t_start:.2f}s")
_snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 170
|
from math import factorial, pi
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_lowercase ) )
def __lowerCamelCase ( _lowercase , _lowercase = 30 ) -> float:
if not isinstance(_lowercase , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_lowercase , _lowercase ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
UpperCamelCase = float(_lowercase )
UpperCamelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 170
| 1
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class __snake_case ( __magic_name__ , unittest.TestCase ):
__lowerCAmelCase = ReformerTokenizer
__lowerCAmelCase = ReformerTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = True
def _snake_case ( self ) -> List[str]:
super().setUp()
snake_case__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ) -> Optional[Any]:
snake_case__ = '<s>'
snake_case__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def _snake_case ( self ) -> Any:
snake_case__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(UpperCamelCase_ ) , 1000 )
def _snake_case ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
snake_case__ = self.get_tokenizer()
snake_case__ = self.get_rust_tokenizer()
snake_case__ = 'I was born in 92000, and this is falsé.'
snake_case__ = tokenizer.tokenize(UpperCamelCase_ )
snake_case__ = rust_tokenizer.tokenize(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
snake_case__ = rust_tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
snake_case__ = self.get_rust_tokenizer()
snake_case__ = tokenizer.encode(UpperCamelCase_ )
snake_case__ = rust_tokenizer.encode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_=15 ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
# Simple input
snake_case__ = 'This is a simple input'
snake_case__ = ['This is a simple input 1', 'This is a simple input 2']
snake_case__ = ('This is a simple input', 'This is a pair')
snake_case__ = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Simple input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(UpperCamelCase_ , tokenizer_r.encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' )
# Pair input
self.assertRaises(
UpperCamelCase_ , tokenizer_r.batch_encode_plus , UpperCamelCase_ , max_length=UpperCamelCase_ , padding='max_length' , )
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self ) -> str:
snake_case__ = ReformerTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
snake_case__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCamelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [285, 46, 10, 170, 382] , )
snake_case__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
snake_case__ = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
snake_case__ = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def _snake_case ( self ) -> Optional[int]:
return ReformerTokenizer.from_pretrained('google/reformer-crime-and-punishment' )
@slow
def _snake_case ( self ) -> List[Any]:
snake_case__ = 'Hello World!'
snake_case__ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@slow
def _snake_case ( self ) -> str:
snake_case__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
snake_case__ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(UpperCamelCase_ , self.big_tokenizer.encode(UpperCamelCase_ ) )
@require_torch
@slow
def _snake_case ( self ) -> str:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
snake_case__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
snake_case__ = ' '.join(UpperCamelCase_ )
snake_case__ = self.big_tokenizer.encode_plus(UpperCamelCase_ , return_tensors='pt' )
snake_case__ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors='pt' )
snake_case__ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
snake_case__ = encoded_sequence['input_ids'].shape
snake_case__ = ReformerModel(UpperCamelCase_ )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**UpperCamelCase_ )
model(**UpperCamelCase_ )
@slow
def _snake_case ( self ) -> Union[str, Any]:
# fmt: off
snake_case__ = {'input_ids': [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
snake_case__ = [
'This is a very simple sentence.',
'The quick brown fox jumps over the lazy dog.',
]
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name='google/reformer-crime-and-punishment' , revision='0e6c3decb8211d49bf881013425dc8b0448b3f5a' , padding=UpperCamelCase_ , sequences=UpperCamelCase_ , )
| 368
|
'''simple docstring'''
import cmath
import math
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->complex:
snake_case__ = math.radians(UpperCAmelCase_ )
snake_case__ = math.radians(UpperCAmelCase_ )
# Convert voltage and current to rectangular form
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ = cmath.rect(UpperCAmelCase_ , UpperCAmelCase_ )
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 368
| 1
|
"""simple docstring"""
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase ( snake_case ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
_UpperCAmelCase = {}
def __A ( self , a__ , *a__ , **a__ ):
_UpperCAmelCase = super().add_tokens(a__ , *a__ , **a__ )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.' )
def __A ( self , a__ , *a__ , a__=1 , **a__ ):
_UpperCAmelCase = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
else:
_UpperCAmelCase = []
for i in range(a__ ):
_UpperCAmelCase = placeholder_token + f"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__ )
output.append(a__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
_UpperCAmelCase = output
def __A ( self , a__ , a__=False , a__=1.0 ):
if isinstance(a__ , a__ ):
_UpperCAmelCase = []
for i in range(len(a__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase = self.token_map[placeholder_token]
_UpperCAmelCase = tokens[: 1 + int(len(a__ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase = copy.copy(a__ )
random.shuffle(a__ )
_UpperCAmelCase = text.replace(a__ , ' '.join(a__ ) )
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
def __A ( self , a__ , *a__ , a__=False , a__=1.0 , **a__ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__ ) , *a__ , **a__ , )
| 494
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
for param in module.parameters():
_UpperCAmelCase = False
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
_UpperCAmelCase = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __lowerCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = plt.imshow(SCREAMING_SNAKE_CASE )
fig.axes.get_xaxis().set_visible(SCREAMING_SNAKE_CASE )
fig.axes.get_yaxis().set_visible(SCREAMING_SNAKE_CASE )
plt.show()
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = datetime.now()
_UpperCAmelCase = current_time.strftime('%H:%M:%S' )
return timestamp
| 494
| 1
|
def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float ):
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 149
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[str] , __a : Optional[int] , __a : List[str]=14 , __a : Optional[Any]=7 , __a : List[Any]=True , __a : Tuple=True , __a : Union[str, Any]=True , __a : Any=True , __a : Any=True , __a : Dict=99 , __a : List[Any]=32 , __a : Union[str, Any]=5 , __a : List[Any]=4 , __a : Tuple=37 , __a : Dict="gelu" , __a : Tuple=0.1 , __a : str=0.1 , __a : Optional[int]=512 , __a : Union[str, Any]=16 , __a : Tuple=2 , __a : Tuple=0.02 , __a : List[str]=3 , __a : Tuple=4 , __a : int=None , ) -> int:
"""simple docstring"""
__lowercase : Tuple = parent
__lowercase : Optional[int] = batch_size
__lowercase : int = seq_length
__lowercase : Any = is_training
__lowercase : str = use_token_type_ids
__lowercase : Dict = use_input_mask
__lowercase : Tuple = use_labels
__lowercase : Optional[Any] = use_mc_token_ids
__lowercase : int = vocab_size
__lowercase : Optional[int] = hidden_size
__lowercase : int = num_hidden_layers
__lowercase : Tuple = num_attention_heads
__lowercase : Any = intermediate_size
__lowercase : Any = hidden_act
__lowercase : Optional[Any] = hidden_dropout_prob
__lowercase : Dict = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : List[Any] = type_vocab_size
__lowercase : List[str] = type_sequence_label_size
__lowercase : Optional[Any] = initializer_range
__lowercase : List[Any] = num_labels
__lowercase : str = num_choices
__lowercase : List[str] = scope
__lowercase : Optional[Any] = self.vocab_size - 1
def lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : int = None
if self.use_input_mask:
__lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Tuple = None
if self.use_token_type_ids:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : Dict = None
if self.use_mc_token_ids:
__lowercase : int = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowercase : Tuple = None
__lowercase : int = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Dict = self.get_config()
__lowercase : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase ( self : List[str] , __a : Tuple , __a : str , __a : Optional[int] , __a : Any , __a : Union[str, Any] , *__a : List[str] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
__lowercase : int = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase ( self : Any , __a : Union[str, Any] , __a : str , __a : List[Any] , __a : Union[str, Any] , __a : Optional[Any] , *__a : List[Any] ) -> Tuple:
"""simple docstring"""
__lowercase : str = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
__lowercase : Any = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[str] = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) : int = config_and_inputs
__lowercase : Optional[int] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase ( self : int , __a : int , __a : Dict , __a : str , __a : List[str] , *__a : str ) -> int:
"""simple docstring"""
__lowercase : List[str] = self.num_labels
__lowercase : Optional[Any] = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
__lowercase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : List[str] = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCAmelCase ( __a , __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
_A : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
_A : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : str = True
_A : List[Any] = False
_A : List[Any] = False
def lowerCAmelCase ( self : int , __a : Tuple , __a : int , __a : str , __a : int , __a : Dict ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : Tuple = CTRLModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=__a , n_embd=37 )
def lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def lowerCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : List[Any] = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase : int = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__a )
__lowercase : str = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
__lowercase : Union[str, Any] = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase : List[Any] = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 149
| 1
|
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( __lowerCAmelCase : str = "" , ) -> bool:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def __lowerCamelCase ( __lowerCAmelCase : str = "" ) -> bool:
if len(__lowerCAmelCase ) == 0:
return True
__UpperCamelCase : Dict = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
__UpperCamelCase : dict[str, int] = {}
for character in lower_case_input_str:
__UpperCamelCase : Dict = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
__UpperCamelCase : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( __lowerCAmelCase : str = "" ) -> None:
print("""\nFor string = """ , __lowerCAmelCase , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
UpperCamelCase = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
UpperCamelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 708
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class _A ( UpperCAmelCase_ ):
lowercase_ : Union[str, Any] = '''gpt_neox_japanese'''
def __init__( self : Optional[Any] , lowerCamelCase__ : List[Any]=3_20_00 , lowerCamelCase__ : Any=25_60 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Union[str, Any]=32 , lowerCamelCase__ : str=4 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : str=1.00 , lowerCamelCase__ : str=1_00_00 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : Any=0.02 , lowerCamelCase__ : List[Any]=1e-5 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : int=3_19_96 , lowerCamelCase__ : Optional[Any]=3_19_99 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : List[Any]=0.0 , **lowerCamelCase__ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
__UpperCamelCase : List[Any] = vocab_size
__UpperCamelCase : List[Any] = max_position_embeddings
__UpperCamelCase : Dict = hidden_size
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : List[Any] = num_attention_heads
__UpperCamelCase : List[str] = intermediate_multiple_size
__UpperCamelCase : List[str] = hidden_act
__UpperCamelCase : str = rotary_pct
__UpperCamelCase : Optional[int] = rotary_emb_base
__UpperCamelCase : List[Any] = initializer_range
__UpperCamelCase : Union[str, Any] = layer_norm_eps
__UpperCamelCase : Optional[int] = use_cache
__UpperCamelCase : Optional[int] = attention_dropout
__UpperCamelCase : int = hidden_dropout
| 515
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__snake_case : Any = logging.get_logger(__name__)
__snake_case : Any = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
__snake_case : List[str] = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def a_ ( __a ):
A__ = {}
with open(__a , '''r''' ) as file:
for line_number, line in enumerate(__a ):
A__ = line.strip()
if line:
A__ = line.split()
A__ = line_number
A__ = words[0]
A__ = value
return result
def a_ ( __a , __a , __a , __a , __a ):
for attribute in key.split('''.''' ):
A__ = getattr(__a , __a )
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
A__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
A__ = '''param'''
if weight_type is not None and weight_type != "param":
A__ = getattr(__a , __a ).shape
elif weight_type is not None and weight_type == "param":
A__ = hf_pointer
for attribute in hf_param_name.split('''.''' ):
A__ = getattr(__a , __a )
A__ = shape_pointer.shape
# let's reduce dimension
A__ = value[0]
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
A__ = getattr(__a , __a )
A__ = value
else:
A__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a_ ( __a , __a , __a , __a , __a ):
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__a ):
A__ = PARAM_MAPPING[full_name.split('''.''' )[-1]]
A__ = '''param'''
if weight_type is not None and weight_type != "param":
A__ = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ = '''.'''.join([key, hf_param_name] )
else:
A__ = key
A__ = value if '''lm_head''' in full_key else value[0]
__snake_case : List[Any] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def a_ ( __a , __a , __a=None , __a=None ):
A__ = False
for key, mapped_key in MAPPING.items():
A__ = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(__a )[0].split('''.''' )[-2]
A__ = mapped_key.replace('''*''' , __a )
if "weight_g" in name:
A__ = '''weight_g'''
elif "weight_v" in name:
A__ = '''weight_v'''
elif "bias" in name:
A__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = '''weight'''
else:
A__ = None
if hf_dict is not None:
rename_dict(__a , __a , __a , __a , __a )
else:
set_recursively(__a , __a , __a , __a , __a )
return is_used
return is_used
def a_ ( __a , __a , __a ):
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
__a , __a , __a , __a , hf_model.config.feat_extract_norm == '''group''' , )
A__ = True
else:
A__ = load_wavaveca_layer(__a , __a , __a )
if not is_used:
unused_weights.append(__a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def a_ ( __a , __a , __a , __a , __a ):
A__ = full_name.split('''conv_layers.''' )[-1]
A__ = name.split('''.''' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__a )
@torch.no_grad()
def a_ ( __a , __a , __a=None , __a=None , __a=True , __a=False ):
if config_path is not None:
A__ = WavaVecaConfig.from_pretrained(__a )
else:
A__ = WavaVecaConfig()
if is_seq_class:
A__ = read_txt_into_dict(__a )
A__ = idalabel
A__ = WavaVecaForSequenceClassification(__a )
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
feature_extractor.save_pretrained(__a )
elif is_finetuned:
if dict_path:
A__ = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(__a , '''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a , exist_ok=__a )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 0
A__ = 1
with open(__a , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(__a , __a )
A__ = WavaVecaCTCTokenizer(
__a , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__a , )
A__ = True if config.feat_extract_norm == '''layer''' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__a , return_attention_mask=__a , )
A__ = WavaVecaProcessor(feature_extractor=__a , tokenizer=__a )
processor.save_pretrained(__a )
A__ = WavaVecaForCTC(__a )
else:
A__ = WavaVecaForPreTraining(__a )
if is_finetuned or is_seq_class:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
A__ = argparse.Namespace(task='''audio_pretraining''' )
A__ = fairseq.tasks.setup_task(__a )
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__a )
A__ = model[0].eval()
recursively_load_weights(__a , __a , not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
__snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
__snake_case : str = parser.parse_args()
__snake_case : List[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 571
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__snake_case : Union[str, Any] = logging.get_logger(__name__)
__snake_case : List[Any] = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
__snake_case : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( __a ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
A__ = model_type_to_module_name(__a )
A__ = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(__a , __a )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__a , '''__name__''' , __a ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A__ = importlib.import_module('''transformers''' )
if hasattr(__a , __a ):
return getattr(__a , __a )
return None
def a_ ( __a , __a = None , __a = False , __a = False , __a = None , __a = None , __a = None , __a = False , **__a , ):
A__ = get_file_from_repo(
__a , __a , cache_dir=__a , force_download=__a , resume_download=__a , proxies=__a , use_auth_token=__a , revision=__a , local_files_only=__a , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(__a , encoding='''utf-8''' ) as reader:
return json.load(__a )
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] ):
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowerCamelCase )
def A__ ( cls : str , _lowerCamelCase : Dict , **_lowerCamelCase : List[str] ):
A__ = kwargs.pop('''config''' , _lowerCamelCase )
A__ = kwargs.pop('''trust_remote_code''' , _lowerCamelCase )
A__ = True
A__ , A__ = ImageProcessingMixin.get_image_processor_dict(_lowerCamelCase , **_lowerCamelCase )
A__ = config_dict.get('''image_processor_type''' , _lowerCamelCase )
A__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
A__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
A__ = config_dict.pop('''feature_extractor_type''' , _lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
A__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
A__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
A__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
A__ = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
# It could be in `config.image_processor_type``
A__ = getattr(_lowerCamelCase , '''image_processor_type''' , _lowerCamelCase )
if hasattr(_lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
A__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
A__ = image_processor_class_from_name(_lowerCamelCase )
A__ = image_processor_auto_map is not None
A__ = image_processor_class is not None or type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
A__ = resolve_trust_remote_code(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if has_remote_code and trust_remote_code:
A__ = get_class_from_dynamic_module(
_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
A__ = kwargs.pop('''code_revision''' , _lowerCamelCase )
if os.path.isdir(_lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
A__ = IMAGE_PROCESSOR_MAPPING[type(_lowerCamelCase )]
return image_processor_class.from_dict(_lowerCamelCase , **_lowerCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def A__ ( _lowerCamelCase : str , _lowerCamelCase : str ):
IMAGE_PROCESSOR_MAPPING.register(_lowerCamelCase , _lowerCamelCase )
| 571
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , lowercase__ : Optional[int] , lowercase__ : List[Any]=3 , lowercase__ : Optional[int]=32 , lowercase__ : Optional[Any]=3 , lowercase__ : int=10 , lowercase__ : str=[10, 20, 30, 40] , lowercase__ : Any=[1, 1, 2, 1] , lowercase__ : List[Any]=True , lowercase__ : Tuple=True , lowercase__ : List[str]="relu" , lowercase__ : Tuple=3 , lowercase__ : List[str]=None , ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : Tuple = parent
_UpperCamelCase : List[Any] = batch_size
_UpperCamelCase : int = image_size
_UpperCamelCase : Optional[int] = num_channels
_UpperCamelCase : List[str] = embeddings_size
_UpperCamelCase : List[Any] = hidden_sizes
_UpperCamelCase : str = depths
_UpperCamelCase : str = is_training
_UpperCamelCase : Tuple = use_labels
_UpperCamelCase : str = hidden_act
_UpperCamelCase : str = num_labels
_UpperCamelCase : List[Any] = scope
_UpperCamelCase : int = len(lowercase__ )
def snake_case__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : int = self.get_config()
return config, pixel_values
def snake_case__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def snake_case__ ( self : Any , lowercase__ : Dict , lowercase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCamelCase : Tuple = FlaxRegNetModel(config=lowercase__ )
_UpperCamelCase : Tuple = model(lowercase__ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def snake_case__ ( self : Optional[int] , lowercase__ : Optional[Any] , lowercase__ : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.num_labels
_UpperCamelCase : Tuple = FlaxRegNetForImageClassification(config=lowercase__ )
_UpperCamelCase : Any = model(lowercase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Any = self.prepare_config_and_inputs()
_UpperCamelCase : List[Any] = config_and_inputs
_UpperCamelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def snake_case__ ( self : str ) ->None:
'''simple docstring'''
_UpperCamelCase : int = FlaxRegNetModelTester(self )
_UpperCamelCase : str = ConfigTester(self , config_class=lowercase__ , has_text_modality=lowercase__ )
def snake_case__ ( self : str ) ->List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
return
def snake_case__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase__ )
def snake_case__ ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase__ )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def snake_case__ ( self : int ) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def snake_case__ ( self : List[str] ) ->Dict:
'''simple docstring'''
pass
def snake_case__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = model_class(lowercase__ )
_UpperCamelCase : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : Tuple = [*signature.parameters.keys()]
_UpperCamelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase__ )
def snake_case__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
def check_hidden_states_output(lowercase__ : Union[str, Any] , lowercase__ : Optional[int] , lowercase__ : Optional[int] ):
_UpperCamelCase : Tuple = model_class(lowercase__ )
_UpperCamelCase : List[str] = model(**self._prepare_for_class(lowercase__ , lowercase__ ) )
_UpperCamelCase : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCamelCase : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase__ ) , expected_num_stages + 1 )
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Optional[int] = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCamelCase : str = True
check_hidden_states_output(lowercase__ , lowercase__ , lowercase__ )
def snake_case__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase : List[Any] = self._prepare_for_class(lowercase__ , lowercase__ )
_UpperCamelCase : Union[str, Any] = model_class(lowercase__ )
@jax.jit
def model_jitted(lowercase__ : Optional[int] , **lowercase__ : Dict ):
return model(pixel_values=lowercase__ , **lowercase__ )
with self.subTest("JIT Enabled" ):
_UpperCamelCase : str = model_jitted(**lowercase__ ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCamelCase : Any = model_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( ) -> int:
'''simple docstring'''
_UpperCamelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self : Tuple ) ->Dict:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def snake_case__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCamelCase : str = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
_UpperCamelCase : Tuple = self.default_image_processor
_UpperCamelCase : Optional[int] = prepare_img()
_UpperCamelCase : int = image_processor(images=lowercase__ , return_tensors="np" )
_UpperCamelCase : str = model(**lowercase__ )
# verify the logits
_UpperCamelCase : Tuple = (1, 1_000)
self.assertEqual(outputs.logits.shape , lowercase__ )
_UpperCamelCase : List[str] = jnp.array([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , lowercase__ , atol=1e-4 ) )
| 702
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : Any = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : int = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 204
| 0
|
"""simple docstring"""
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
UpperCAmelCase =logging.get_logger(__name__)
UpperCAmelCase ={
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_=None ,lowerCamelCase_=None ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> Optional[int]:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
if config is None:
assert isinstance(self.model ,lowerCamelCase_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
A = self.model.config
else:
A = config
A = data_args
A = self.config.tgt_vocab_size if isinstance(self.config ,lowerCamelCase_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
""" padding..""" )
if self.args.label_smoothing == 0:
A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A = label_smoothed_nll_loss
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> List[Any]:
if self.optimizer is None:
A = ["""bias""", """LayerNorm.weight"""]
A = [
{
"""params""": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"""weight_decay""": self.args.weight_decay,
},
{
"""params""": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A = Adafactor
A = {"""scale_parameter""": False, """relative_step""": False}
else:
A = AdamW
A = {
"""betas""": (self.args.adam_betaa, self.args.adam_betaa),
"""eps""": self.args.adam_epsilon,
}
A = self.args.learning_rate
if self.sharded_ddp:
A = OSS(
params=lowerCamelCase_ ,optim=lowerCamelCase_ ,**lowerCamelCase_ ,)
else:
A = optimizer_cls(lowerCamelCase_ ,**lowerCamelCase_ )
if self.lr_scheduler is None:
A = self._get_lr_scheduler(lowerCamelCase_ )
else: # ignoring --lr_scheduler
logger.warning("""scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.""" )
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A = schedule_func(self.optimizer ,num_warmup_steps=self.args.warmup_steps )
else:
A = schedule_func(
self.optimizer ,num_warmup_steps=self.args.warmup_steps ,num_training_steps=lowerCamelCase_ )
return scheduler
def UpperCamelCase__ ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset ,torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size ,distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) ,)
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A = model(**lowerCamelCase_ ,use_cache=lowerCamelCase_ )[0]
A = self.loss_fn(logits.view(-1 ,logits.shape[-1] ) ,labels.view(-1 ) )
else:
# compute usual loss via models
A , A = model(**lowerCamelCase_ ,labels=lowerCamelCase_ ,use_cache=lowerCamelCase_ )[:2]
else:
# compute label smoothed loss
A = model(**lowerCamelCase_ ,use_cache=lowerCamelCase_ )[0]
A = torch.nn.functional.log_softmax(lowerCamelCase_ ,dim=-1 )
A , A = self.loss_fn(lowerCamelCase_ ,lowerCamelCase_ ,self.args.label_smoothing ,ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[Any]:
A = inputs.pop("""labels""" )
A , A = self._compute_loss(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return loss
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = None ,) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A = self._prepare_inputs(lowerCamelCase_ )
A = {
"""max_length""": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"""num_beams""": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A = self.model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,**lowerCamelCase_ ,)
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(lowerCamelCase_ ,gen_kwargs["""max_length"""] )
A = inputs.pop("""labels""" )
with torch.no_grad():
# compute loss on predict data
A , A = self._compute_loss(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(lowerCamelCase_ ,gen_kwargs["""max_length"""] )
return (loss, logits, labels)
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
# If PAD token is not defined at least EOS token has to be defined
A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"""Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"""
f' padded to `max_length`={max_length}' )
A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) ,dtype=tensor.dtype ,device=tensor.device )
A = tensor
return padded_tensor
| 617
|
"""simple docstring"""
from __future__ import annotations
def _A ( _a : list[float] , _a : list[float] ):
"""simple docstring"""
A = sorted(numsa + numsa )
A , A = divmod(len(_a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase =[float(x) for x in input("Enter the elements of first array: ").split()]
UpperCAmelCase =[float(x) for x in input("Enter the elements of second array: ").split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 617
| 1
|
from math import sqrt
def a_ ( __snake_case ) -> int:
'''simple docstring'''
UpperCamelCase_ = 0
for i in range(1 , int(sqrt(__snake_case ) + 1 ) ):
if n % i == 0 and i != sqrt(__snake_case ):
total += i + n // i
elif i == sqrt(__snake_case ):
total += i
return total - n
def a_ ( __snake_case = 1_0_0_0_0 ) -> int:
'''simple docstring'''
UpperCamelCase_ = sum(
i
for i in range(1 , __snake_case )
if sum_of_divisors(sum_of_divisors(__snake_case ) ) == i and sum_of_divisors(__snake_case ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 559
|
import argparse
__a : int = """docs/source/_static/js/custom.js"""
def a_ ( __snake_case ) -> Optional[int]:
'''simple docstring'''
with open(__snake_case , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase_ = f.readlines()
UpperCamelCase_ = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
UpperCamelCase_ = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
__a : Tuple = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
__a : str = parser.parse_args()
update_custom_js(args.version)
| 559
| 1
|
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_lowerCAmelCase = (
'4S 3H 2C 7S 5H',
'9D 8H 2C 6S 7H',
'2D 6D 9D TH 7D',
'TC 8C 2S JH 6C',
'JH 8S TH AH QH',
'TS KS 5S 9S AC',
'KD 6S 9D TH AD',
'KS 8D 4D 9S 4S', # pair
'8C 4S KH JS 4D', # pair
'QH 8H KD JH 8S', # pair
'KC 4H KS 2H 8D', # pair
'KD 4S KC 3H 8S', # pair
'AH 8S AS KC JH', # pair
'3H 4C 4H 3S 2H', # 2 pairs
'5S 5D 2C KH KH', # 2 pairs
'3C KH 5D 5S KH', # 2 pairs
'AS 3C KH AD KH', # 2 pairs
'7C 7S 3S 7H 5S', # 3 of a kind
'7C 7S KH 2H 7H', # 3 of a kind
'AC KH QH AH AS', # 3 of a kind
'2H 4D 3C AS 5S', # straight (low ace)
'3C 5C 4C 2C 6H', # straight
'6S 8S 7S 5H 9H', # straight
'JS QS 9H TS KH', # straight
'QC KH TS JS AH', # straight (high ace)
'8C 9C 5C 3C TC', # flush
'3S 8S 9S 5S KS', # flush
'4C 5C 9C 8C KC', # flush
'JH 8H AH KH QH', # flush
'3D 2H 3H 2C 2D', # full house
'2H 2C 3S 3H 3D', # full house
'KH KC 3S 3H 3D', # full house
'JC 6H JS JD JH', # 4 of a kind
'JC 7H JS JD JH', # 4 of a kind
'JC KH JS JD JH', # 4 of a kind
'2S AS 4S 5S 3S', # straight flush (low ace)
'2D 6D 3D 4D 5D', # straight flush
'5C 6C 3C 7C 4C', # straight flush
'JH 9H TH KH QH', # straight flush
'JH AH TH KH QH', # royal flush (high ace straight flush)
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', 'KS AS TS QS JS', 'Loss'),
('2H 3H 4H 5H 6H', 'AS AD AC AH JD', 'Win'),
('AS AH 2H AD AC', 'JS JD JC JH 3D', 'Win'),
('2S AH 2H AS AC', 'JS JD JC JH AD', 'Loss'),
('2S AH 2H AS AC', '2H 3H 5H 6H 7H', 'Win'),
('AS 3S 4S 8S 2S', '2H 3H 5H 6H 7H', 'Win'),
('2H 3H 5H 6H 7H', '2S 3H 4H 5S 6C', 'Win'),
('2S 3H 4H 5S 6C', '3D 4C 5H 6H 2S', 'Tie'),
('2S 3H 4H 5S 6C', 'AH AC 5H 6H AS', 'Win'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H AS', 'Loss'),
('2S 2H 4H 5S 4C', 'AH AC 5H 6H 7S', 'Win'),
('6S AD 7H 4S AS', 'AH AC 5H 6H 7S', 'Loss'),
('2S AH 4H 5S KC', 'AH AC 5H 6H 7S', 'Loss'),
('2S 3H 6H 7S 9C', '7H 3C TH 6H 9S', 'Loss'),
('4S 5H 6H TS AC', '3S 5H 6H TS AC', 'Win'),
('2S AH 4H 5S 6C', 'AD 4C 5H 6H 2C', 'Tie'),
('AS AH 3H AD AC', 'AS AH 2H AD AC', 'Win'),
('AH AC 5H 5C QS', 'AH AC 5H 5C KS', 'Loss'),
('AH AC 5H 5C QS', 'KH KC 5H 5C QS', 'Win'),
('7C 7S KH 2H 7H', '3C 3S AH 2H 3H', 'Win'),
('3C 3S AH 2H 3H', '7C 7S KH 2H 7H', 'Loss'),
('6H 5H 4H 3H 2H', '5H 4H 3H 2H AH', 'Win'),
('5H 4H 3H 2H AH', '5H 4H 3H 2H AH', 'Tie'),
('5H 4H 3H 2H AH', '6H 5H 4H 3H 2H', 'Loss'),
('AH AD KS KC AC', 'AH KD KH AC KC', 'Win'),
('2H 4D 3C AS 5S', '2H 4D 3C 6S 5S', 'Loss'),
('2H 3S 3C 3H 2S', '3S 3C 2S 2H 2D', 'Win'),
('4D 6D 5D 2D JH', '3S 8S 3H TC KH', 'Loss'),
('4S 6C 8S 3S 7S', 'AD KS 2D 7D 7C', 'Loss'),
('6S 4C 7H 8C 3H', '5H JC AH 9D 9C', 'Loss'),
('9D 9H JH TC QH', '3C 2S JS 5C 7H', 'Win'),
('2H TC 8S AD 9S', '4H TS 7H 2C 5C', 'Win'),
('9D 3S 2C 7S 7C', 'JC TD 3C TC 9H', 'Loss'),
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', True),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', False),
('AS 3S 4S 8S 2S', True),
)
_lowerCAmelCase = (
('2H 3H 4H 5H 6H', True),
('AS AH 2H AD AC', False),
('2H 3H 5H 6H 7H', False),
('KS AS TS QS JS', True),
('8H 9H QS JS TH', True),
)
_lowerCAmelCase = (
('2H 4D 3C AS 5S', True, [5, 4, 3, 2, 1_4]),
('2H 5D 3C AS 5S', False, [1_4, 5, 5, 3, 2]),
('JH QD KC AS TS', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('9D 3S 2C 7S 7C', False, [9, 7, 7, 3, 2]),
)
_lowerCAmelCase = (
('JH AH TH KH QH', 0),
('JH 9H TH KH QH', 0),
('JC KH JS JD JH', 7),
('KH KC 3S 3H 3D', 6),
('8C 9C 5C 3C TC', 0),
('JS QS 9H TS KH', 0),
('7C 7S KH 2H 7H', 3),
('3C KH 5D 5S KH', 2),
('QH 8H KD JH 8S', 1),
('2D 6D 9D TH 7D', 0),
)
_lowerCAmelCase = (
('JH AH TH KH QH', 2_3),
('JH 9H TH KH QH', 2_2),
('JC KH JS JD JH', 2_1),
('KH KC 3S 3H 3D', 2_0),
('8C 9C 5C 3C TC', 1_9),
('JS QS 9H TS KH', 1_8),
('7C 7S KH 2H 7H', 1_7),
('3C KH 5D 5S KH', 1_6),
('QH 8H KD JH 8S', 1_5),
('2D 6D 9D TH 7D', 1_4),
)
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase, _lowerCAmelCase : Dict = randrange(len(A__ ) ), randrange(len(A__ ) )
_lowerCAmelCase : Optional[Any] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
_lowerCAmelCase, _lowerCAmelCase : int = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase__ ( _lowerCamelCase = 100 ):
'''simple docstring'''
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
_lowerCAmelCase : Optional[Any] = poker_hands.copy()
shuffle(A__ )
_lowerCAmelCase : Optional[int] = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = PokerHand('2C 4S AS 3D 5C' )
_lowerCAmelCase : str = True
_lowerCAmelCase : Optional[int] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase__ ( ):
'''simple docstring'''
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Optional[int] = os.path.abspath(os.path.dirname(A__ ) )
_lowerCAmelCase : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
_lowerCAmelCase : Dict = line[:14].strip()
_lowerCAmelCase : Any = line[15:].strip()
_lowerCAmelCase, _lowerCAmelCase : Union[str, Any] = PokerHand(A__ ), PokerHand(A__ )
_lowerCAmelCase : Dict = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 259
|
from collections.abc import Callable
import numpy as np
def __a ( A__ : Callable , A__ : float , A__ : float , A__ : float , A__ : float ):
SCREAMING_SNAKE_CASE = int(np.ceil((x_end - xa) / step_size ) )
SCREAMING_SNAKE_CASE = np.zeros((n + 1,) )
SCREAMING_SNAKE_CASE = ya
SCREAMING_SNAKE_CASE = xa
for k in range(A__ ):
SCREAMING_SNAKE_CASE = y[k] + step_size * ode_func(A__ , y[k] )
SCREAMING_SNAKE_CASE = y[k] + (
(step_size / 2) * (ode_func(A__ , y[k] ) + ode_func(x + step_size , A__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 16
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
__lowerCamelCase : int = logging.get_logger(__name__)
class __snake_case :
def __init__( self : str , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Any=None , _lowercase : List[Any]=None ):
"""simple docstring"""
if not conversation_id:
SCREAMING_SNAKE_CASE__ = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = conversation_id
SCREAMING_SNAKE_CASE__ = past_user_inputs
SCREAMING_SNAKE_CASE__ = generated_responses
SCREAMING_SNAKE_CASE__ = text
def __eq__( self : Union[str, Any] , _lowercase : Union[str, Any] ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def __a ( self : List[Any] , _lowercase : str , _lowercase : bool = False ):
"""simple docstring"""
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ = text
def __a ( self : Tuple ):
"""simple docstring"""
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ = None
def __a ( self : str , _lowercase : str ):
"""simple docstring"""
self.generated_responses.append(_UpperCamelCase )
def __a ( self : str ):
"""simple docstring"""
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
__lowerCAmelCase , r"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , )
class __snake_case ( __lowerCAmelCase ):
def __init__( self : Tuple , *_lowercase : str , **_lowercase : Dict ):
"""simple docstring"""
super().__init__(*_UpperCamelCase , **_UpperCamelCase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer.eos_token
def __a ( self : int , _lowercase : Union[str, Any]=None , _lowercase : Optional[Any]=None , _lowercase : Union[str, Any]=None , **_lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_UpperCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : Any , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Optional[Any]=0 , **_lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = super().__call__(_UpperCamelCase , num_workers=_UpperCamelCase , **_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) == 1:
return outputs[0]
return outputs
def __a ( self : Tuple , _lowercase : Conversation , _lowercase : Tuple=32 ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer , """_build_conversation_input_ids""" ):
SCREAMING_SNAKE_CASE__ = self.tokenizer._build_conversation_input_ids(_UpperCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ = self._legacy_parse_and_tokenize(_UpperCamelCase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def __a ( self : Dict , _lowercase : Union[str, Any] , _lowercase : Tuple=10 , **_lowercase : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = generate_kwargs.get("""max_length""" , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ = model_inputs["""attention_mask"""][:, -trim:]
SCREAMING_SNAKE_CASE__ = model_inputs.pop("""conversation""" )
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = self.model.generate(**_UpperCamelCase , **_UpperCamelCase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ = 1
else:
SCREAMING_SNAKE_CASE__ = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def __a ( self : Union[str, Any] , _lowercase : List[str] , _lowercase : str=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = model_outputs["""output_ids"""]
SCREAMING_SNAKE_CASE__ = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase , )
SCREAMING_SNAKE_CASE__ = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(_UpperCamelCase )
return conversation
def __a ( self : Optional[int] , _lowercase : Conversation ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) )
if len(_UpperCamelCase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 715
|
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__lowerCamelCase : List[str] = TypeVar('''KEY''')
__lowerCamelCase : int = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase_ , slots=lowerCamelCase_ )
class __snake_case ( Generic[KEY, VAL] ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
class __snake_case ( _Item ):
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase )
def __bool__( self : List[str] ):
"""simple docstring"""
return False
__lowerCamelCase : str = _DeletedItem()
class __snake_case ( MutableMapping[KEY, VAL] ):
def __init__( self : Optional[int] , _lowercase : int = 8 , _lowercase : float = 0.75 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = initial_block_size
SCREAMING_SNAKE_CASE__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
SCREAMING_SNAKE_CASE__ = capacity_factor
SCREAMING_SNAKE_CASE__ = 0
def __a ( self : Union[str, Any] , _lowercase : KEY ):
"""simple docstring"""
return hash(_lowercase ) % len(self._buckets )
def __a ( self : List[str] , _lowercase : int ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def __a ( self : Union[str, Any] , _lowercase : int , _lowercase : KEY , _lowercase : VAL ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._buckets[ind]
if not stored:
SCREAMING_SNAKE_CASE__ = _Item(_lowercase , _lowercase )
self._len += 1
return True
elif stored.key == key:
SCREAMING_SNAKE_CASE__ = _Item(_lowercase , _lowercase )
return True
else:
return False
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
SCREAMING_SNAKE_CASE__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __a ( self : List[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._buckets
SCREAMING_SNAKE_CASE__ = [None] * new_size
SCREAMING_SNAKE_CASE__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __a ( self : Tuple ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def __a ( self : Tuple ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def __a ( self : List[str] , _lowercase : KEY ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_bucket_index(_lowercase )
for _ in range(len(self._buckets ) ):
yield ind
SCREAMING_SNAKE_CASE__ = self._get_next_ind(_lowercase )
def __a ( self : List[str] , _lowercase : KEY , _lowercase : VAL ):
"""simple docstring"""
for ind in self._iterate_buckets(_lowercase ):
if self._try_set(_lowercase , _lowercase , _lowercase ):
break
def __setitem__( self : List[str] , _lowercase : KEY , _lowercase : VAL ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_lowercase , _lowercase )
def __delitem__( self : Tuple , _lowercase : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(_lowercase ):
SCREAMING_SNAKE_CASE__ = self._buckets[ind]
if item is None:
raise KeyError(_lowercase )
if item is _deleted:
continue
if item.key == key:
SCREAMING_SNAKE_CASE__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple , _lowercase : KEY ):
"""simple docstring"""
for ind in self._iterate_buckets(_lowercase ):
SCREAMING_SNAKE_CASE__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_lowercase )
def __len__( self : List[Any] ):
"""simple docstring"""
return self._len
def __iter__( self : List[str] ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """ ,""".join(
f"""{item.key}: {item.val}""" for item in self._buckets if item )
return f"""HashMap({val_string})"""
| 379
| 0
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[Any] , lowercase : str , lowercase : Union[str, Any]=13 , lowercase : Any=30 , lowercase : int=2 , lowercase : Dict=3 , lowercase : List[str]=True , lowercase : Optional[Any]=True , lowercase : Union[str, Any]=32 , lowercase : Tuple=2 , lowercase : List[Any]=4 , lowercase : Dict=37 , lowercase : Tuple="gelu" , lowercase : Any=0.1 , lowercase : List[str]=0.1 , lowercase : Tuple=10 , lowercase : int=0.02 , lowercase : Tuple=3 , lowercase : List[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_snake_case = (image_size // patch_size) ** 2
_snake_case = num_patches + 1
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Dict ):
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : Any , lowercase : Optional[Any] , lowercase : Tuple ):
'''simple docstring'''
_snake_case = TFViTModel(config=lowercase )
_snake_case = model(lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
_snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def A ( self : Union[str, Any] , lowercase : int , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = self.type_sequence_label_size
_snake_case = TFViTForImageClassification(lowercase )
_snake_case = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_snake_case = self.image_size // 2
_snake_case = pixel_values[:, :, :image_size, :image_size]
_snake_case = model(lowercase , interpolate_pos_encoding=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFViTForImageClassification(lowercase )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase : Dict = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = TFViTModelTester(self )
_snake_case = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : Tuple ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , tf.keras.layers.Layer ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(lowercase )
def a_ ( ) -> List[str]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[int] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_snake_case = model(**lowercase )
# verify the logits
_snake_case = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , lowercase , atol=1E-4 )
| 686
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
_lowerCamelCase : int = {
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Dict = [
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 686
| 1
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('.')
def lowercase__ ( A ):
snake_case__ : Optional[Any] = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'''{test_file} instead.''' )
snake_case__ : int = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'''`test_file` should be a python file. Got {test_fn} instead.''' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'''`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.''' )
snake_case__ : Union[str, Any] = components[:-1] + [test_fn.replace('.py' , '' )]
snake_case__ : List[Any] = '.'.join(UpperCamelCase__ )
return test_module_path
def lowercase__ ( A ):
snake_case__ : str = get_module_path(UpperCamelCase__ )
snake_case__ : Union[str, Any] = importlib.import_module(UpperCamelCase__ )
return test_module
def lowercase__ ( A ):
snake_case__ : List[Any] = []
snake_case__ : Union[str, Any] = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda A : x.__name__ )
def lowercase__ ( A ):
snake_case__ : Dict = []
snake_case__ : str = get_test_module(UpperCamelCase__ )
for attr in dir(UpperCamelCase__ ):
snake_case__ : str = getattr(UpperCamelCase__ , UpperCamelCase__ )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
snake_case__ : List[str] = getattr(UpperCamelCase__ , 'all_model_classes' , [] )
if len(UpperCamelCase__ ) > 0:
test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda A : x.__name__ )
def lowercase__ ( A ):
snake_case__ : List[Any] = get_test_classes(UpperCamelCase__ )
snake_case__ : Any = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda A : x.__name__ )
def lowercase__ ( A ):
snake_case__ : Any = test_class()
if hasattr(UpperCamelCase__ , 'setUp' ):
test.setUp()
snake_case__ : Union[str, Any] = None
if hasattr(UpperCamelCase__ , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
snake_case__ : str = test.model_tester.__class__
return model_tester
def lowercase__ ( A , A ):
snake_case__ : Any = get_test_classes(UpperCamelCase__ )
snake_case__ : str = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda A : x.__name__ )
def lowercase__ ( A , A ):
snake_case__ : Optional[Any] = get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Dict = []
for test_class in test_classes:
snake_case__ : List[Any] = get_model_tester_from_test_class(UpperCamelCase__ )
if tester_class is not None:
tester_classes.append(UpperCamelCase__ )
# sort with class names
return sorted(UpperCamelCase__ , key=lambda A : x.__name__ )
def lowercase__ ( A ):
snake_case__ : List[str] = get_test_classes(UpperCamelCase__ )
snake_case__ : Any = {test_class: get_model_tester_from_test_class(UpperCamelCase__ ) for test_class in test_classes}
return test_tester_mapping
def lowercase__ ( A ):
snake_case__ : Optional[int] = get_model_classes(UpperCamelCase__ )
snake_case__ : Dict = {
model_class: get_test_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_test_mapping
def lowercase__ ( A ):
snake_case__ : Optional[Any] = get_model_classes(UpperCamelCase__ )
snake_case__ : List[str] = {
model_class: get_tester_classes_for_model(UpperCamelCase__ , UpperCamelCase__ ) for model_class in model_classes
}
return model_to_tester_mapping
def lowercase__ ( A ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return o.__name__
elif isinstance(UpperCamelCase__ , (list, tuple) ):
return [to_json(UpperCamelCase__ ) for x in o]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
return {to_json(UpperCamelCase__ ): to_json(UpperCamelCase__ ) for k, v in o.items()}
else:
return o
| 715
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class snake_case__ :
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Dict=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : int=1_0 , _lowerCamelCase : Dict=3 , _lowerCamelCase : List[str]=3_2 * 8 , _lowerCamelCase : Tuple=3_2 * 8 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Optional[Any]=6_4 , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : str = is_training
snake_case__ : List[str] = use_auxiliary_loss
snake_case__ : Union[str, Any] = num_queries
snake_case__ : List[Any] = num_channels
snake_case__ : Dict = min_size
snake_case__ : str = max_size
snake_case__ : Any = num_labels
snake_case__ : int = hidden_dim
snake_case__ : List[Any] = hidden_dim
def UpperCAmelCase__ ( self : str ):
snake_case__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
snake_case__ : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
snake_case__ : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
snake_case__ : str = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
snake_case__ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case__ : Optional[Any] = self.num_queries
snake_case__ : int = self.num_labels
snake_case__ : Any = [1, 1, 1, 1]
snake_case__ : str = self.num_channels
snake_case__ : List[str] = 6_4
snake_case__ : Optional[int] = 1_2_8
snake_case__ : Optional[int] = self.hidden_dim
snake_case__ : Optional[int] = self.hidden_dim
snake_case__ : Union[str, Any] = self.hidden_dim
return config
def UpperCAmelCase__ ( self : str ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ : Tuple = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Dict ):
snake_case__ : Optional[Any] = output.encoder_hidden_states
snake_case__ : Optional[int] = output.pixel_decoder_hidden_states
snake_case__ : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str]=False ):
with torch.no_grad():
snake_case__ : Union[str, Any] = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
snake_case__ : Optional[Any] = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] ):
snake_case__ : int = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case__ : Optional[Any] = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
snake_case__ : Any = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
snake_case__ : List[Any] = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class snake_case__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =(MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCAmelCase ={'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[Any] = MaskaFormerModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Dict ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : int ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(_lowerCamelCase )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case__ : Tuple = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : int = (self.model_tester.min_size,) * 2
snake_case__ : Tuple = {
'pixel_values': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'mask_labels': torch.randn((2, 1_0, *size) , device=_lowerCamelCase ),
'class_labels': torch.zeros(2 , 1_0 , device=_lowerCamelCase ).long(),
}
snake_case__ : str = self.model_tester.get_config()
snake_case__ : Optional[int] = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
snake_case__ : Tuple = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase__ ( self : str ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(_lowerCamelCase ).to(_lowerCamelCase )
snake_case__ : List[str] = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase__ ( self : Any ):
if not self.model_tester.is_training:
return
snake_case__ : Tuple = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
snake_case__ : int = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Any ):
snake_case__ : int = self.all_model_classes[1]
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = True
snake_case__ : Optional[Any] = True
snake_case__ : Tuple = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
snake_case__ : Union[str, Any] = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
snake_case__ : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case__ : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case__ : Dict = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase : int = 1e-4
def lowercase__( ):
snake_case__ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class snake_case__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCAmelCase__ ( self : str ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
snake_case__ : Dict = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Any = image_processor(_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
snake_case__ : Dict = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case__ : List[Any] = model(**_lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
snake_case__ : List[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
snake_case__ : str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
snake_case__ : Union[str, Any] = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Tuple = image_processor(_lowerCamelCase , return_tensors='pt' ).to(_lowerCamelCase )
snake_case__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 3_8_4, 3_8_4) )
with torch.no_grad():
snake_case__ : Dict = model(**_lowerCamelCase )
# masks_queries_logits
snake_case__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case__ : Optional[Any] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
snake_case__ : Optional[Any] = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
snake_case__ : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case__ : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
snake_case__ : Any = self.default_image_processor
snake_case__ : Optional[int] = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors='pt' , )
snake_case__ : Dict = inputs['pixel_values'].to(_lowerCamelCase )
snake_case__ : Optional[Any] = [el.to(_lowerCamelCase ) for el in inputs['mask_labels']]
snake_case__ : Union[str, Any] = [el.to(_lowerCamelCase ) for el in inputs['class_labels']]
with torch.no_grad():
snake_case__ : Tuple = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 303
| 0
|
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = set({'''(''', '''[''', '''{'''} )
lowerCamelCase__ : List[Any] = set({''')''', ''']''', '''}'''} )
lowerCamelCase__ : Any = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCAmelCase ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCAmelCase ) == 0 or (len(UpperCAmelCase ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCAmelCase ) == 0
def _a ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCAmelCase ):
print(UpperCAmelCase , '''is balanced''' )
else:
print(UpperCAmelCase , '''is not balanced''' )
if __name__ == "__main__":
main()
| 315
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 315
| 1
|
a = {
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634E-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355_818,
}
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_UpperCAmelCase = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(snake_case )}"
)
raise ValueError(snake_case )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175
|
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> int:
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Optional[int]:
# jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"}
_UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = JsonDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Any:
if issubclass(snake_case , snake_case ):
_UpperCAmelCase = jsonl_path
elif issubclass(snake_case , snake_case ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_dataset(snake_case , snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=("train",) ) -> str:
assert isinstance(snake_case , snake_case )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case ) -> Optional[Any]:
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_json_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return json.load(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> List[Any]:
return [json.loads(snake_case ) for line in buffer]
class _A :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert isinstance(exported_content[0] , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , lines=_SCREAMING_SNAKE_CASE , orient=_SCREAMING_SNAKE_CASE , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(_SCREAMING_SNAKE_CASE )
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_SCREAMING_SNAKE_CASE , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_SCREAMING_SNAKE_CASE ) == 10
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
with pytest.raises(_SCREAMING_SNAKE_CASE ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / F"test.json.{extension}"
_UpperCAmelCase = str(shared_datadir / F"test_file.json.{extension}" )
JsonDatasetWriter(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compression=_SCREAMING_SNAKE_CASE ).write()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(_SCREAMING_SNAKE_CASE , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content
| 175
| 1
|
'''simple docstring'''
import unittest
from knapsack import knapsack as k
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = [0]
UpperCAmelCase__ = [0]
UpperCAmelCase__ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
UpperCAmelCase__ = [60]
UpperCAmelCase__ = [10]
UpperCAmelCase__ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = 3
UpperCAmelCase__ = [1, 2, 3]
UpperCAmelCase__ = [3, 2, 1]
UpperCAmelCase__ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = 50
UpperCAmelCase__ = [60, 1_00, 1_20]
UpperCAmelCase__ = [10, 20, 30]
UpperCAmelCase__ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 2_20 )
if __name__ == "__main__":
unittest.main()
| 603
|
'''simple docstring'''
UpperCAmelCase_ = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 603
| 1
|
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser(
description=(
'Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='roberta', choices=['roberta', 'gpt2'])
parser.add_argument('--model_name', default='roberta-large', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_roberta_048131723.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
__snake_case :Union[str, Any] =parser.parse_args()
if args.model_type == "roberta":
__snake_case :List[Any] =RobertaForMaskedLM.from_pretrained(args.model_name)
__snake_case :str ='roberta'
elif args.model_type == "gpt2":
__snake_case :Any =GPTaLMHeadModel.from_pretrained(args.model_name)
__snake_case :Dict ='transformer'
__snake_case :str =model.state_dict()
__snake_case :Tuple ={}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__snake_case :Dict =state_dict[F'''{prefix}.{param_name}''']
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__snake_case :Dict =F'''{prefix}.embeddings.{w}.weight'''
__snake_case :Any =state_dict[param_name]
for w in ["weight", "bias"]:
__snake_case :Tuple =F'''{prefix}.embeddings.LayerNorm.{w}'''
__snake_case :Tuple =state_dict[param_name]
# Transformer Blocks #
__snake_case :Dict =0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__snake_case :Optional[int] =state_dict[
F'''{prefix}.h.{teacher_idx}.{layer}.{w}'''
]
__snake_case :Tuple =state_dict[F'''{prefix}.h.{teacher_idx}.attn.bias''']
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__snake_case :Any =state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}'''
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__snake_case :Optional[int] =state_dict[F'''{layer}''']
if args.vocab_transform:
for w in ["weight", "bias"]:
__snake_case :Optional[Any] =state_dict[F'''lm_head.dense.{w}''']
__snake_case :Optional[Any] =state_dict[F'''lm_head.layer_norm.{w}''']
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__snake_case :Union[str, Any] =state_dict[F'''{prefix}.ln_f.{w}''']
__snake_case :str =state_dict['lm_head.weight']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 224
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> int:
'''simple docstring'''
A = []
for part_id in partition_order:
A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(lowerCAmelCase__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Dict:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(2 )
A = [1, 0]
A = _generate_iterable_examples(lowerCAmelCase__ , lowerCAmelCase__ ) # Reverse the partitions.
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , lowerCAmelCase__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Any:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(10 ).repartition(1 )
A = SparkExamplesIterable(lowerCAmelCase__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
A = lambda lowerCAmelCase__ : x.reverse()
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [2, 1, 0] )
A = SparkExamplesIterable(lowerCAmelCase__ ).shuffle_data_sources(lowerCAmelCase__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> Tuple:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A = SparkExamplesIterable(lowerCAmelCase__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCAmelCase__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowerCAmelCase__ ):
A , A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase_ ( ) -> int:
'''simple docstring'''
A = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
A = spark.range(100 ).repartition(1 )
A = Spark(lowerCAmelCase__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 224
| 1
|
'''simple docstring'''
import re
def a ( _UpperCAmelCase ) -> str:
"""simple docstring"""
if len(re.findall('[ATCG]' , _UpperCAmelCase ) ) != len(_UpperCAmelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 697
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
__lowerCAmelCase ="naver-clova-ix/donut-base"
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a_ = DonutProcessor.from_pretrained(UpperCAmelCase__ )
def __SCREAMING_SNAKE_CASE ( self ) -> str:
a_ = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
a_ = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
a_ = self.processor.tokenajson(UpperCAmelCase__ )
self.assertDictEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 697
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ ( __snake_case : Tuple) -> str:
lowerCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''')
lowerCAmelCase_ = json.loads(open(__snake_case).read())
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''')
if not args.output.endswith('''.pt'''):
lowerCAmelCase_ = args.output + '''.pt'''
lowerCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0'''):
lowerCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir)
lowerCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase_ = reader.get_tensor(__snake_case).astype(np.floataa)
if key_name.endswith('''/adam_m''') or key_name.endswith('''/adam_v'''):
continue
if key_name.startswith('''pasts/'''):
if key_name.startswith('''pasts/mlp'''):
lowerCAmelCase_ = int(key_name[9])
elif key_name.startswith('''pasts/out'''):
lowerCAmelCase_ = 8
lowerCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/moe'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/switch_gating/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/softmlp/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/wo/kernel''') or key_name.endswith('''/wi/kernel'''):
lowerCAmelCase_ = key_name[-9:-7]
for i in range(16):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCAmelCase_ = (
vnp[i].transpose([1, 0]).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/mlp'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/p1/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p1/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/ln'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/att'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/qkv/kernel'''):
lowerCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase_ = state[:, 0, :, :]
lowerCAmelCase_ = state[:, 1, :, :]
lowerCAmelCase_ = state[:, 2, :, :]
lowerCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/o/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/an'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif (
key_name.startswith('''model/wte''')
or key_name.startswith('''model/wpe''')
or key_name.startswith('''model/ete''')
):
lowerCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCAmelCase_ = '''model.%s.weight''' % nlayer
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
if key_name.startswith('''model/wte'''):
lowerCAmelCase_ = '''lm_head.weight'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/wob'''):
lowerCAmelCase_ = '''final_logits_bias'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = state.reshape((1, -1))
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense/kernel":
lowerCAmelCase_ = '''model.last_project.weight'''
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense_1/bias":
lowerCAmelCase_ = '''model.last_project.bias'''
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
torch.save(__snake_case , args.output)
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
A_ : Union[str, Any] =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 606
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
A_ : Dict =logging.getLogger(__name__)
def snake_case_ ( __snake_case : torch.nn.Module , __snake_case : BnbQuantizationConfig , __snake_case : Union[str, os.PathLike] = None , __snake_case : Optional[Dict[str, Union[int, str, torch.device]]] = None , __snake_case : Optional[List[str]] = None , __snake_case : Optional[Dict[Union[int, str], Union[int, str]]] = None , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , ) -> List[str]:
lowerCAmelCase_ = bnb_quantization_config.load_in_abit
lowerCAmelCase_ = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''')
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''')
lowerCAmelCase_ = []
# custom device map
if isinstance(__snake_case , __snake_case) and len(device_map.keys()) > 1:
lowerCAmelCase_ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase_ = get_keys_to_not_convert(__snake_case)
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__snake_case)
lowerCAmelCase_ = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase_ = []
lowerCAmelCase_ = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__snake_case)
# compatibility with peft
lowerCAmelCase_ = load_in_abit
lowerCAmelCase_ = load_in_abit
lowerCAmelCase_ = get_parameter_device(__snake_case)
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''')
lowerCAmelCase_ = replace_with_bnb_layers(__snake_case , __snake_case , modules_to_not_convert=__snake_case)
# convert param to the right dtype
lowerCAmelCase_ = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules):
param.to(torch.floataa)
if param.dtype != torch.floataa:
lowerCAmelCase_ = name.replace('''.weight''' , '''''').replace('''.bias''' , '''''')
lowerCAmelCase_ = getattr(__snake_case , __snake_case , __snake_case)
if param is not None:
param.to(torch.floataa)
elif torch.is_floating_point(__snake_case):
param.to(__snake_case)
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device())
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device())
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''')
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''')
else:
with init_empty_weights():
lowerCAmelCase_ = replace_with_bnb_layers(
__snake_case , __snake_case , modules_to_not_convert=__snake_case)
lowerCAmelCase_ = get_quantized_model_device_map(
__snake_case , __snake_case , __snake_case , max_memory=__snake_case , no_split_module_classes=__snake_case , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase_ = True
lowerCAmelCase_ = any(x in list(device_map.values()) for x in ['''cpu''', '''disk'''])
load_checkpoint_in_model(
__snake_case , __snake_case , __snake_case , dtype=bnb_quantization_config.torch_dtype , offload_folder=__snake_case , offload_state_dict=__snake_case , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__snake_case , device_map=__snake_case , offload_dir=__snake_case)
def snake_case_ ( __snake_case : int , __snake_case : Optional[Any] , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : Optional[int]=None) -> Dict:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase_ = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''')
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''')
if isinstance(__snake_case , __snake_case):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''')
lowerCAmelCase_ = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules)
})
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules)
})
lowerCAmelCase_ = {}
lowerCAmelCase_ = special_dtypes
lowerCAmelCase_ = no_split_module_classes
lowerCAmelCase_ = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase_ = get_balanced_memory(
__snake_case , low_zero=(device_map == '''balanced_low_0''') , max_memory=__snake_case , **__snake_case , )
lowerCAmelCase_ = max_memory
lowerCAmelCase_ = infer_auto_device_map(__snake_case , **__snake_case)
if isinstance(__snake_case , __snake_case):
# check if don't have any quantized module on the cpu
lowerCAmelCase_ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase_ = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''')
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''')
del device_map_without_some_modules
return device_map
def snake_case_ ( __snake_case : Dict , __snake_case : List[Any] , __snake_case : Dict=None , __snake_case : Optional[int]=None) -> str:
if modules_to_not_convert is None:
lowerCAmelCase_ = []
lowerCAmelCase_ ,lowerCAmelCase_ = _replace_with_bnb_layers(
__snake_case , __snake_case , __snake_case , __snake_case)
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''')
return model
def snake_case_ ( __snake_case : int , __snake_case : int , __snake_case : Optional[int]=None , __snake_case : int=None , ) -> Any:
lowerCAmelCase_ = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase_ = []
current_key_name.append(__snake_case)
if isinstance(__snake_case , nn.Linear) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase_ = '''.'''.join(__snake_case)
lowerCAmelCase_ = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase_ = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase_ = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__snake_case , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase_ = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''')
lowerCAmelCase_ = module.weight.data
if module.bias is not None:
lowerCAmelCase_ = module.bias.data
bnb_module.requires_grad_(__snake_case)
setattr(__snake_case , __snake_case , __snake_case)
lowerCAmelCase_ = True
if len(list(module.children())) > 0:
lowerCAmelCase_ ,lowerCAmelCase_ = _replace_with_bnb_layers(
__snake_case , __snake_case , __snake_case , __snake_case)
lowerCAmelCase_ = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def snake_case_ ( __snake_case : List[str]) -> Any:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase_ = deepcopy(__snake_case) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase_ = find_tied_parameters(__snake_case)
# For compatibility with Accelerate < 0.18
if isinstance(__snake_case , __snake_case):
lowerCAmelCase_ = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
lowerCAmelCase_ = sum(__snake_case , [])
lowerCAmelCase_ = len(__snake_case) > 0
# Check if it is a base model
lowerCAmelCase_ = False
if hasattr(__snake_case , '''base_model_prefix'''):
lowerCAmelCase_ = not hasattr(__snake_case , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase_ = list(model.named_children())
lowerCAmelCase_ = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase_ = set(__snake_case) - set(__snake_case)
lowerCAmelCase_ = list(set(__snake_case)) + list(__snake_case)
# remove ".weight" from the keys
lowerCAmelCase_ = ['''.weight''', '''.bias''']
lowerCAmelCase_ = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase_ = name.replace(__snake_case , '''''')
filtered_module_names.append(__snake_case)
return filtered_module_names
def snake_case_ ( __snake_case : Union[str, Any]) -> str:
for m in model.modules():
if isinstance(__snake_case , bnb.nn.Linearabit):
return True
return False
def snake_case_ ( __snake_case : nn.Module) -> str:
return next(parameter.parameters()).device
def snake_case_ ( __snake_case : int , __snake_case : Any , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Any , __snake_case : Tuple) -> int:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__snake_case , __snake_case , 0 , dtype=__snake_case , value=__snake_case)
lowerCAmelCase_ = param_name
lowerCAmelCase_ = model
if "." in tensor_name:
lowerCAmelCase_ = tensor_name.split('''.''')
for split in splits[:-1]:
lowerCAmelCase_ = getattr(__snake_case , __snake_case)
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''')
lowerCAmelCase_ = new_module
lowerCAmelCase_ = splits[-1]
# offload weights
lowerCAmelCase_ = False
offload_weight(module._parameters[tensor_name] , __snake_case , __snake_case , index=__snake_case)
if hasattr(module._parameters[tensor_name] , '''SCB'''):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''') , __snake_case , index=__snake_case , )
else:
offload_weight(__snake_case , __snake_case , __snake_case , index=__snake_case)
offload_weight(__snake_case , param_name.replace('''weight''' , '''SCB''') , __snake_case , index=__snake_case)
set_module_tensor_to_device(__snake_case , __snake_case , '''meta''' , dtype=__snake_case , value=torch.empty(*param.size()))
| 606
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""facebook/timesformer""": """https://huggingface.co/facebook/timesformer/resolve/main/config.json""",
}
class __lowerCAmelCase ( __UpperCamelCase ):
lowerCAmelCase__ = """timesformer"""
def __init__( self , __UpperCAmelCase=224 , __UpperCAmelCase=16 , __UpperCAmelCase=3 , __UpperCAmelCase=8 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=12 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-6 , __UpperCAmelCase=True , __UpperCAmelCase="divided_space_time" , __UpperCAmelCase=0 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = num_frames
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = qkv_bias
__lowerCamelCase = attention_type
__lowerCamelCase = drop_path_rate
| 175
|
def __UpperCamelCase ( lowercase__ : int , lowercase__ : int , lowercase__ : int ) -> float:
'''simple docstring'''
lowerCAmelCase_ : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __UpperCamelCase ( ) -> str:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 600
| 0
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCAmelCase : Any = get_tests_dir("fixtures")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _UpperCAmelCase ( self : Tuple):
"""simple docstring"""
lowercase_ = mock.Mock()
lowercase_ = 5_0_0
lowercase_ = {}
lowercase_ = HTTPError
lowercase_ = {}
# Download this model to make sure it's in the cache.
lowercase_ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCAmelCase_) as mock_head:
lowercase_ = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""")
# This check we did call the fake head request
mock_head.assert_called()
def _UpperCAmelCase ( self : str):
"""simple docstring"""
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""")
@is_staging_test
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@classmethod
def _UpperCAmelCase ( cls : str):
"""simple docstring"""
lowercase_ = TOKEN
HfFolder.save_token(lowerCAmelCase_)
@classmethod
def _UpperCAmelCase ( cls : Any):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""")
except HTTPError:
pass
def _UpperCAmelCase ( self : Dict):
"""simple docstring"""
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_)
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token)
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id="""test-feature-extractor""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''')
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_)
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token)
lowercase_ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCAmelCase_ , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCAmelCase_ , use_auth_token=self._token)
lowercase_ = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""")
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCAmelCase_ , getattr(lowerCAmelCase_ , lowerCAmelCase_))
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
lowercase_ = CustomFeatureExtractor.from_pretrained(lowerCAmelCase_)
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowercase_ = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=lowerCAmelCase_)
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""")
| 706
|
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> np.ndarray:
'''simple docstring'''
lowercase_ = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
lowercase_ = xgb.predict(__lowerCAmelCase )
lowercase_ = predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def _SCREAMING_SNAKE_CASE () -> None:
'''simple docstring'''
lowercase_ = fetch_california_housing()
lowercase_ , lowercase_ = data_handling(__lowerCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
lowercase_ = xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
print(F'''Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 100
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : Optional[int] )-> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A ( self : Optional[Any] )-> int:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A ( self : str )-> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A ( self : str )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_28, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
__UpperCamelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_28, 1_28) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A ( self : Tuple )-> List[str]:
__UpperCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=A_ , unet=self.dummy_unet , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ , steps=4 , return_dict=A_ )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([69, 2_55, 2_55, 2_55, 0, 0, 77, 1_81, 12, 1_27] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(raw_audio=A_ , generator=A_ , start_step=5 , steps=10 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_20, 1_17, 1_10, 1_09, 1_38, 1_67, 1_38, 1_48, 1_32, 1_21] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=A_ , mel=A_ , scheduler=A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 10) )
__UpperCamelCase = pipe(generator=A_ , encoding=A_ )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_07, 1_03, 1_20, 1_27, 1_42, 1_22, 1_13, 1_22, 97, 1_11] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Optional[int] )-> Union[str, Any]:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(42 )
__UpperCamelCase = pipe(generator=A_ )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
__UpperCamelCase = np.array([1_51, 1_67, 1_54, 1_44, 1_22, 1_34, 1_21, 1_05, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 505
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case__ )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
_snake_case : str = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_snake_case : ClassVar[Features] = Features({'text': Value('string' )} )
_snake_case : ClassVar[Features] = Features({'summary': Value('string' )} )
_snake_case : str = "text"
_snake_case : str = "summary"
@property
def A ( self : Any )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 505
| 1
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = '''▁'''
lowerCamelCase__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
lowerCamelCase__ = {
'''vocab_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json''',
},
'''spm_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model''',
},
'''tokenizer_config_file''': {
'''facebook/m2m100_418M''': '''https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json''',
'''facebook/m2m100_1.2B''': '''https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json''',
},
}
lowerCamelCase__ = {
'''facebook/m2m100_418M''': 10_24,
}
# fmt: off
lowerCamelCase__ = {
'''m2m100''': ['''af''', '''am''', '''ar''', '''ast''', '''az''', '''ba''', '''be''', '''bg''', '''bn''', '''br''', '''bs''', '''ca''', '''ceb''', '''cs''', '''cy''', '''da''', '''de''', '''el''', '''en''', '''es''', '''et''', '''fa''', '''ff''', '''fi''', '''fr''', '''fy''', '''ga''', '''gd''', '''gl''', '''gu''', '''ha''', '''he''', '''hi''', '''hr''', '''ht''', '''hu''', '''hy''', '''id''', '''ig''', '''ilo''', '''is''', '''it''', '''ja''', '''jv''', '''ka''', '''kk''', '''km''', '''kn''', '''ko''', '''lb''', '''lg''', '''ln''', '''lo''', '''lt''', '''lv''', '''mg''', '''mk''', '''ml''', '''mn''', '''mr''', '''ms''', '''my''', '''ne''', '''nl''', '''no''', '''ns''', '''oc''', '''or''', '''pa''', '''pl''', '''ps''', '''pt''', '''ro''', '''ru''', '''sd''', '''si''', '''sk''', '''sl''', '''so''', '''sq''', '''sr''', '''ss''', '''su''', '''sv''', '''sw''', '''ta''', '''th''', '''tl''', '''tn''', '''tr''', '''uk''', '''ur''', '''uz''', '''vi''', '''wo''', '''xh''', '''yi''', '''yo''', '''zh''', '''zu'''],
'''wmt21''': ['''en''', '''ha''', '''is''', '''ja''', '''cs''', '''ru''', '''zh''', '''de''']
}
class __magic_name__ (__lowercase ):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = ['''input_ids''', '''attention_mask''']
lowerCamelCase__ = []
lowerCamelCase__ = []
def __init__( self , _a , _a , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<pad>" , _a="<unk>" , _a="m2m100" , _a = None , _a=8 , **_a , ) -> None:
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase_ = language_codes
lowerCAmelCase_ = FAIRSEQ_LANGUAGE_CODES[language_codes]
lowerCAmelCase_ = {lang_code: f"__{lang_code}__" for lang_code in fairseq_language_code}
lowerCAmelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(_a )
for lang_code in fairseq_language_code
if self.get_lang_token(_a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_a , tgt_lang=_a , bos_token=_a , eos_token=_a , sep_token=_a , unk_token=_a , pad_token=_a , language_codes=_a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=_a , **_a , )
lowerCAmelCase_ = vocab_file
lowerCAmelCase_ = load_json(_a )
lowerCAmelCase_ = {v: k for k, v in self.encoder.items()}
lowerCAmelCase_ = spm_file
lowerCAmelCase_ = load_spm(_a , self.sp_model_kwargs )
lowerCAmelCase_ = len(self.encoder )
lowerCAmelCase_ = {
self.get_lang_token(_a ): self.encoder_size + i for i, lang_code in enumerate(_a )
}
lowerCAmelCase_ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(_a )}
lowerCAmelCase_ = {v: k for k, v in self.lang_token_to_id.items()}
lowerCAmelCase_ = src_lang if src_lang is not None else "en"
lowerCAmelCase_ = tgt_lang
lowerCAmelCase_ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
lowerCAmelCase_ = num_madeup_words
@property
def __a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __a ( self ) -> str:
return self._src_lang
@src_lang.setter
def __a ( self , _a ) -> None:
lowerCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __a ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __a ( self , _a ) -> List[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(_a , self.encoder[self.unk_token] )
def __a ( self , _a ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(_a , self.unk_token )
def __a ( self , _a ) -> Optional[Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
lowerCAmelCase_ = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __a ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
lowerCAmelCase_ = [1] * len(self.prefix_tokens )
lowerCAmelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_a )) + suffix_ones
return prefix_ones + ([0] * len(_a )) + ([0] * len(_a )) + suffix_ones
def __a ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __a ( self ) -> Dict:
lowerCAmelCase_ = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
return state
def __setstate__( self , _a ) -> None:
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def __a ( self , _a , _a = None ) -> Tuple[str]:
lowerCAmelCase_ = Path(_a )
if not save_dir.is_dir():
raise OSError(f"{save_directory} should be a directory" )
lowerCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
lowerCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , _a )
if os.path.abspath(self.spm_file ) != os.path.abspath(_a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _a )
elif not os.path.isfile(self.spm_file ):
with open(_a , "wb" ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_a )
return (str(_a ), str(_a ))
def __a ( self , _a , _a = "en" , _a = None , _a = "ro" , **_a , ) -> BatchEncoding:
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __a ( self , _a , _a , _a , **_a ) -> Dict:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase_ = src_lang
lowerCAmelCase_ = self(_a , add_special_tokens=_a , **_a )
lowerCAmelCase_ = self.get_lang_id(_a )
lowerCAmelCase_ = tgt_lang_id
return inputs
def __a ( self ) -> Optional[int]:
self.set_src_lang_special_tokens(self.src_lang )
def __a ( self ) -> Any:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __a ( self , _a ) -> None:
lowerCAmelCase_ = self.get_lang_token(_a )
lowerCAmelCase_ = self.lang_token_to_id[lang_token]
lowerCAmelCase_ = [self.cur_lang_id]
lowerCAmelCase_ = [self.eos_token_id]
def __a ( self , _a ) -> None:
lowerCAmelCase_ = self.get_lang_token(_a )
lowerCAmelCase_ = self.lang_token_to_id[lang_token]
lowerCAmelCase_ = [self.cur_lang_id]
lowerCAmelCase_ = [self.eos_token_id]
def __a ( self , _a ) -> str:
return self.lang_code_to_token[lang]
def __a ( self , _a ) -> int:
lowerCAmelCase_ = self.get_lang_token(_a )
return self.lang_token_to_id[lang_token]
def A(__a: str , __a: Dict[str, Any] ):
lowerCAmelCase_ = sentencepiece.SentencePieceProcessor(**__a )
spm.Load(str(__a ) )
return spm
def A(__a: str ):
with open(__a , "r" ) as f:
return json.load(__a )
def A(__a: Union[str, Any] , __a: str ):
with open(__a , "w" ) as f:
json.dump(__a , __a , indent=2 )
| 226
|
from __future__ import annotations
from PIL import Image
# Define glider example
lowerCamelCase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowerCamelCase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def A(__a: list[list[int]] ):
lowerCAmelCase_ = []
for i in range(len(__a ) ):
lowerCAmelCase_ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase_ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__a ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__a ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__a ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase_ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__a )
return next_generation
def A(__a: list[list[int]] , __a: int ):
lowerCAmelCase_ = []
for _ in range(__a ):
# Create output image
lowerCAmelCase_ = Image.new("RGB" , (len(cells[0] ), len(__a )) )
lowerCAmelCase_ = img.load()
# Save cells to image
for x in range(len(__a ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase_ = 255 - cells[y][x] * 255
lowerCAmelCase_ = (colour, colour, colour)
# Save image
images.append(__a )
lowerCAmelCase_ = new_generation(__a )
return images
if __name__ == "__main__":
lowerCamelCase__ = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 226
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : Union[str, Any] ={
'''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''],
'''tokenization_xlm''': ['''XLMTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict =[
'''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMForMultipleChoice''',
'''XLMForQuestionAnswering''',
'''XLMForQuestionAnsweringSimple''',
'''XLMForSequenceClassification''',
'''XLMForTokenClassification''',
'''XLMModel''',
'''XLMPreTrainedModel''',
'''XLMWithLMHeadModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Tuple =[
'''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMForMultipleChoice''',
'''TFXLMForQuestionAnsweringSimple''',
'''TFXLMForSequenceClassification''',
'''TFXLMForTokenClassification''',
'''TFXLMMainLayer''',
'''TFXLMModel''',
'''TFXLMPreTrainedModel''',
'''TFXLMWithLMHeadModel''',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase : List[Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 228
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = "isbn/0140328726" ) -> dict:
UpperCamelCase__ : Dict = olid.strip().strip("/" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("/" ) != 1:
UpperCamelCase__ : int = f'{olid} is not a valid Open Library olid'
raise ValueError(__lowerCAmelCase )
return requests.get(f'https://openlibrary.org/{new_olid}.json' ).json()
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> dict:
UpperCamelCase__ : Optional[Any] = {
"title": "Title",
"publish_date": "Publish date",
"authors": "Authors",
"number_of_pages": "Number of pages:",
"first_sentence": "First sentence",
"isbn_10": "ISBN (10)",
"isbn_13": "ISBN (13)",
}
UpperCamelCase__ : Tuple = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
UpperCamelCase__ : Any = [
get_openlibrary_data(author["key"] )["name"] for author in data["Authors"]
]
UpperCamelCase__ : Optional[Any] = data["First sentence"]["value"]
for key, value in data.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ : Dict = ", ".join(__lowerCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
lowerCamelCase : Union[str, Any] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
lowerCamelCase : str =summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""")
| 228
| 1
|
"""simple docstring"""
from __future__ import annotations
import requests
UpperCAmelCase__ = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : int = 1 , __lowerCamelCase : str = "new" , __lowerCamelCase : list | None = None ) -> List[Any]:
_snake_case = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase__ ) - valid_terms ) ):
_snake_case = f'''Invalid search term: {invalid_search_terms}'''
raise ValueError(UpperCamelCase__ )
_snake_case = requests.get(
f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={'''User-agent''': '''A random string'''} , )
if response.status_code == 4_29:
raise requests.HTTPError
_snake_case = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase__ )}
_snake_case = {}
for id_ in range(UpperCamelCase__ ):
_snake_case = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 715
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Dict , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : Union[str, Any]=True , _lowerCamelCase : Any=False , _lowerCamelCase : Optional[int]=10 , _lowerCamelCase : List[Any]=3 , _lowerCamelCase : str=32 * 8 , _lowerCamelCase : Optional[int]=32 * 8 , _lowerCamelCase : Optional[Any]=4 , _lowerCamelCase : Union[str, Any]=64 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = is_training
_snake_case = use_auxiliary_loss
_snake_case = num_queries
_snake_case = num_channels
_snake_case = min_size
_snake_case = max_size
_snake_case = num_labels
_snake_case = hidden_dim
_snake_case = hidden_dim
def lowercase ( self : Optional[int] ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowerCamelCase )
_snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowerCamelCase )
_snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowerCamelCase ) > 0.5
).float()
_snake_case = (torch.rand((self.batch_size, self.num_labels) , device=_lowerCamelCase ) > 0.5).long()
_snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase ( self : Union[str, Any] ):
_snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_snake_case = self.num_queries
_snake_case = self.num_labels
_snake_case = [1, 1, 1, 1]
_snake_case = self.num_channels
_snake_case = 64
_snake_case = 128
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
_snake_case = self.hidden_dim
return config
def lowercase ( self : Union[str, Any] ):
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.prepare_config_and_inputs()
_snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_snake_case = output.encoder_hidden_states
_snake_case = output.pixel_decoder_hidden_states
_snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowerCamelCase ) , config.decoder_layers )
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Any=False ):
with torch.no_grad():
_snake_case = MaskaFormerModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase , output_hidden_states=_lowerCamelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowerCamelCase , _lowerCamelCase )
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
def comm_check_on_output(_lowerCamelCase : List[str] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_snake_case = model(pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
_snake_case = model(
pixel_values=_lowerCamelCase , pixel_mask=_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
comm_check_on_output(_lowerCamelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__a = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : Any ):
_snake_case = MaskaFormerModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def lowercase ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowerCamelCase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase ( self : Union[str, Any] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self : Tuple ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase ( self : Union[str, Any] ):
pass
def lowercase ( self : str ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
@slow
def lowercase ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_snake_case = MaskaFormerModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case = (self.model_tester.min_size,) * 2
_snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowerCamelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowerCamelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowerCamelCase ).long(),
}
_snake_case = self.model_tester.get_config()
_snake_case = MaskaFormerForUniversalSegmentation(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
def lowercase ( self : List[str] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowerCamelCase , **_lowerCamelCase , output_hidden_states=_lowerCamelCase )
def lowercase ( self : Any ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
_snake_case = model(**_lowerCamelCase , output_attentions=_lowerCamelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase ( self : int ):
if not self.model_tester.is_training:
return
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
_snake_case = self.all_model_classes[1]
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case = self.model_tester.prepare_config_and_inputs()
_snake_case = True
_snake_case = True
_snake_case = model_class(_lowerCamelCase ).to(_lowerCamelCase )
model.train()
_snake_case = model(_lowerCamelCase , mask_labels=_lowerCamelCase , class_labels=_lowerCamelCase )
_snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase__ = 1e-4
def _UpperCAmelCase ( ) -> int:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase ( self : Optional[Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase ( self : Any ):
_snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
_snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
_snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(_lowerCamelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[Any] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
_snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowerCamelCase , (1, 3, 384, 384) )
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# masks_queries_logits
_snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_snake_case = torch.tensor(_lowerCamelCase ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
# class_queries_logits
_snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowerCamelCase , atol=_lowerCamelCase ) )
def lowercase ( self : List[str] ):
_snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowerCamelCase ).eval()
_snake_case = self.default_image_processor
_snake_case = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
_snake_case = inputs['''pixel_values'''].to(_lowerCamelCase )
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''mask_labels''']]
_snake_case = [el.to(_lowerCamelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
self.assertTrue(outputs.loss is not None )
| 430
| 0
|
'''simple docstring'''
import math
import tensorflow as tf
from packaging import version
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Dict = tf.convert_to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ )
A_ : List[Any] = tf.cast(math.pi , x.dtype )
A_ : Any = tf.cast(0.044_715 , x.dtype )
A_ : Union[str, Any] = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCamelCase__ , 3 )) ))
return x * cdf
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
return x * tf.tanh(tf.math.softplus(lowerCamelCase__ ) )
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = tf.convert_to_tensor(lowerCamelCase__ )
A_ : Optional[Any] = tf.cast(0.044_715 , x.dtype )
A_ : str = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = tf.convert_to_tensor(lowerCamelCase__ )
A_ : List[Any] = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def a ( lowerCamelCase__ ):
'''simple docstring'''
return tf.clip_by_value(_gelu(lowerCamelCase__ ) , -10 , 10 )
def a ( lowerCamelCase__ , lowerCamelCase__=-1 ):
'''simple docstring'''
A_, A_ : Any = tf.split(lowerCamelCase__ , 2 , axis=lowerCamelCase__ )
return a * tf.math.sigmoid(lowerCamelCase__ )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def a ( lowerCamelCase__ ):
'''simple docstring'''
return tf.keras.activations.gelu(lowerCamelCase__ , approximate=lowerCamelCase__ )
lowerCamelCase :Union[str, Any] = tf.keras.activations.gelu
lowerCamelCase :Optional[Any] = approximate_gelu_wrap
else:
lowerCamelCase :Optional[int] = _gelu
lowerCamelCase :List[str] = _gelu_new
lowerCamelCase :Union[str, Any] = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def a ( lowerCamelCase__ ):
'''simple docstring'''
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(f'function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}' )
| 667
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowerCamelCase :List[str] = imread(R'''digital_image_processing/image_data/lena_small.jpg''')
lowerCamelCase :Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def a ( ):
'''simple docstring'''
A_ : List[Any] = cn.convert_to_negative(lowerCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def a ( ):
'''simple docstring'''
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowerCamelCase__ , 1_10 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def a ( ):
'''simple docstring'''
A_ : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def a ( ):
'''simple docstring'''
A_ : int = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A_ : List[Any] = canny.canny(lowerCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def a ( ):
'''simple docstring'''
assert gg.gaussian_filter(lowerCamelCase__ , 5 , sigma=0.9 ).all()
def a ( ):
'''simple docstring'''
A_ : int = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
A_ : Optional[Any] = conv.img_convolve(lowerCamelCase__ , lowerCamelCase__ ).astype(lowerCamelCase__ )
assert res.any()
def a ( ):
'''simple docstring'''
assert med.median_filter(lowerCamelCase__ , 3 ).any()
def a ( ):
'''simple docstring'''
A_, A_ : int = sob.sobel_filter(lowerCamelCase__ )
assert grad.any() and theta.any()
def a ( ):
'''simple docstring'''
A_ : int = sp.make_sepia(lowerCamelCase__ , 20 )
assert sepia.all()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
'''simple docstring'''
A_ : Any = bs.Burkes(imread(lowerCamelCase__ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def a ( lowerCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
'''simple docstring'''
A_ : Union[str, Any] = rs.NearestNeighbour(imread(lowerCamelCase__ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def a ( ):
'''simple docstring'''
A_ : int = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
A_ : Union[str, Any] = imread(lowerCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A_ : str = 0
A_ : str = 0
A_ : Dict = image[x_coordinate][y_coordinate]
A_ : Optional[Any] = lbp.get_neighbors_pixel(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A_ : str = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A_ : Any = lbp.local_binary_value(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
assert lbp_image.any()
| 667
| 1
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_=0.01 , snake_case_=1_0_0_0 ):
"""simple docstring"""
A_ : int = p_stop
A_ : Tuple = max_length
def __iter__( self ):
"""simple docstring"""
A_ : Optional[Any] = 0
A_ : Tuple = False
while not stop and count < self.max_length:
yield count
count += 1
A_ : Tuple = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_=False , snake_case_=True ):
"""simple docstring"""
A_ : Optional[int] = [
BatchSamplerShard(snake_case_ , 2 , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
for i in range(2 )
]
A_ : str = [list(snake_case_ ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(snake_case_ ) for shard in batch_sampler_shards] , [len(snake_case_ ) for e in expected] )
self.assertListEqual(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
A_ : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : Tuple = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [0, 1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
A_ : Any = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Optional[int] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
A_ : Union[str, Any] = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : Any = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [1, 2, 3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
A_ : Optional[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
# Check the shards when the dataset is very small.
A_ : Tuple = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Dict = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
A_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : List[Any] = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Dict = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
A_ : Union[str, Any] = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : Optional[Any] = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Tuple = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [0, 1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
A_ : int = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : List[str] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [1, 2]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
A_ : int = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
# Check the shards when the dataset is very small.
A_ : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
A_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : int = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[Any] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1, 2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
A_ : List[str] = BatchSampler(range(2_4 ) , batch_size=3 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
A_ : Union[str, Any] = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
A_ : Dict = BatchSampler(range(2_1 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
A_ : Dict = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Dict = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9, 2_0]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7], [2_1]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
A_ : Any = BatchSampler(range(2_2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Any = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
A_ : int = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
A_ : str = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4], [1_8, 1_9]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
A_ : List[Any] = BatchSampler(range(2_0 ) , batch_size=3 , drop_last=snake_case_ )
A_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [1_2, 1_3, 1_4]],
[[3, 4, 5], [9, 1_0, 1_1], [1_5, 1_6, 1_7]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
A_ : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
A_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=snake_case_ )
A_ : Tuple = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , even_batches=snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Dict = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Any = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9], [2_2, 2_3]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
A_ : Tuple = BatchSampler(range(2_4 ) , batch_size=4 , drop_last=snake_case_ )
# Expected shouldn't change
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size.
A_ : Dict = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : List[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0, 2_1]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
A_ : str = BatchSampler(range(2_2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
A_ : List[Any] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7], [2_0]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
A_ : List[str] = BatchSampler(range(2_1 ) , batch_size=4 , drop_last=snake_case_ )
A_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [1_2, 1_3], [1_6, 1_7]],
[[2, 3], [6, 7], [1_0, 1_1], [1_4, 1_5], [1_8, 1_9]],
]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
# Check the shards when the dataset is very small.
A_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Optional[int] = [[[0, 1]], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
A_ : int = BatchSampler(range(2 ) , batch_size=4 , drop_last=snake_case_ )
A_ : Any = [[], []]
self.check_batch_sampler_shards(snake_case_ , snake_case_ , split_batches=snake_case_ , even_batches=snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 1_0, 1_1], [1_2, 1_3]]
A_ : Union[str, Any] = [BatchSamplerShard(snake_case_ , 2 , snake_case_ , even_batches=snake_case_ ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [1_2, 1_3]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 1_0, 1_1]] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=False , snake_case_=2 , snake_case_=False ):
"""simple docstring"""
random.seed(snake_case_ )
A_ : Optional[Any] = list(snake_case_ )
A_ : Any = [
IterableDatasetShard(
snake_case_ , batch_size=snake_case_ , drop_last=snake_case_ , num_processes=snake_case_ , process_index=snake_case_ , split_batches=snake_case_ , )
for i in range(snake_case_ )
]
A_ : Optional[Any] = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(snake_case_ )
iterable_dataset_lists.append(list(snake_case_ ) )
A_ : Any = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
A_ : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
self.assertTrue(len(snake_case_ ) % shard_batch_size == 0 )
A_ : List[Any] = []
for idx in range(0 , len(snake_case_ ) , snake_case_ ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(snake_case_ ) < len(snake_case_ ):
reference += reference
self.assertListEqual(snake_case_ , reference[: len(snake_case_ )] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : int = 4_2
A_ : str = RandomIterableDataset()
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
# Edge case with a very small dataset
A_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
self.check_iterable_dataset_shards(snake_case_ , snake_case_ , batch_size=4 , drop_last=snake_case_ , split_batches=snake_case_ )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = BatchSampler(range(1_6 ) , batch_size=4 , drop_last=snake_case_ )
A_ : List[Any] = SkipBatchSampler(snake_case_ , 2 )
self.assertListEqual(list(snake_case_ ) , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : List[Any] = SkipDataLoader(list(range(1_6 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = DataLoader(list(range(1_6 ) ) , batch_size=4 )
A_ : Optional[Any] = skip_first_batches(snake_case_ , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 1_0, 1_1], [1_2, 1_3, 1_4, 1_5]] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DataLoaderShard(list(range(1_6 ) ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
Accelerator()
A_ : Optional[Any] = DataLoaderDispatcher(range(1_6 ) , batch_size=4 )
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(snake_case_ ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 302
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
lowerCamelCase_ : Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
lowerCamelCase_ : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
A_ : Optional[Any] = model_type_to_module_name(_UpperCAmelCase )
A_ : Optional[int] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(_UpperCAmelCase , _UpperCAmelCase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_UpperCAmelCase , '__name__' , _UpperCAmelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
A_ : Optional[Any] = importlib.import_module('transformers' )
if hasattr(_UpperCAmelCase , _UpperCAmelCase ):
return getattr(_UpperCAmelCase , _UpperCAmelCase )
return None
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = False , **_UpperCAmelCase , ):
"""simple docstring"""
A_ : List[Any] = get_file_from_repo(
_UpperCAmelCase , _UpperCAmelCase , cache_dir=_UpperCAmelCase , force_download=_UpperCAmelCase , resume_download=_UpperCAmelCase , proxies=_UpperCAmelCase , use_auth_token=_UpperCAmelCase , revision=_UpperCAmelCase , local_files_only=_UpperCAmelCase , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(_UpperCAmelCase , encoding='utf-8' ) as reader:
return json.load(_UpperCAmelCase )
class _UpperCAmelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(snake_case_ )
def lowerCamelCase_ ( cls , snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : str = kwargs.pop('config' , snake_case_ )
A_ : Optional[Any] = kwargs.pop('trust_remote_code' , snake_case_ )
A_ : Any = True
A_ , A_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(snake_case_ , **snake_case_ )
A_ : List[Any] = config_dict.get('feature_extractor_type' , snake_case_ )
A_ : Optional[int] = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
A_ : Optional[Any] = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(snake_case_ , snake_case_ ):
A_ : Any = AutoConfig.from_pretrained(snake_case_ , **snake_case_ )
# It could be in `config.feature_extractor_type``
A_ : str = getattr(snake_case_ , 'feature_extractor_type' , snake_case_ )
if hasattr(snake_case_ , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
A_ : List[str] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
A_ : Union[str, Any] = feature_extractor_class_from_name(snake_case_ )
A_ : Dict = feature_extractor_auto_map is not None
A_ : Tuple = feature_extractor_class is not None or type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING
A_ : Optional[int] = resolve_trust_remote_code(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if has_remote_code and trust_remote_code:
A_ : Optional[int] = get_class_from_dynamic_module(
snake_case_ , snake_case_ , **snake_case_ )
A_ : str = kwargs.pop('code_revision' , snake_case_ )
if os.path.isdir(snake_case_ ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(snake_case_ ) in FEATURE_EXTRACTOR_MAPPING:
A_ : List[str] = FEATURE_EXTRACTOR_MAPPING[type(snake_case_ )]
return feature_extractor_class.from_dict(snake_case_ , **snake_case_ )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(snake_case_ , snake_case_ )
| 302
| 1
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase( self : List[Any] ):
a__ : Union[str, Any] = inspect.getfile(accelerate.test_utils )
a__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
a__ : List[Any] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
a__ : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def _UpperCamelCase( self : Optional[int] ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
a__ : int = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Optional[int] ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
a__ : Tuple = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Tuple ):
a__ : List[str] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
@require_multi_gpu
def _UpperCamelCase( self : Any ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
a__ : Optional[int] = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCamelCase__ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase : str = Accelerator()
UpperCamelCase : int = (accelerator.state.process_index + 2, 10)
UpperCamelCase : Optional[Any] = torch.randint(0, 10, shape).to(accelerator.device)
UpperCamelCase : Optional[int] = """"""
UpperCamelCase : Union[str, Any] = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
UpperCamelCase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
UpperCamelCase : Tuple = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 37
|
def UpperCamelCase_ ( __a , __a ) -> Tuple:
a__ : Optional[int] = [0 for i in range(r + 1 )]
# nc0 = 1
a__ : Union[str, Any] = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
a__ : Any = min(__a , __a )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 37
| 1
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/owlvit-base-patch32""": """https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json""",
"""google/owlvit-base-patch16""": """https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json""",
"""google/owlvit-large-patch14""": """https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json""",
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit_text_model'
def __init__( self : Optional[Any] , UpperCAmelCase__ : Optional[int]=49408 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : Dict=2048 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : Dict=8 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : List[Any]="quick_gelu" , UpperCAmelCase__ : Union[str, Any]=1E-5 , UpperCAmelCase__ : List[Any]=0.0 , UpperCAmelCase__ : Optional[Any]=0.02 , UpperCAmelCase__ : Union[str, Any]=1.0 , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : Dict=49406 , UpperCAmelCase__ : str=49407 , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase : Tuple =vocab_size
lowercase : Optional[Any] =hidden_size
lowercase : Dict =intermediate_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : Union[str, Any] =num_attention_heads
lowercase : Tuple =max_position_embeddings
lowercase : Optional[int] =hidden_act
lowercase : Tuple =layer_norm_eps
lowercase : List[Any] =attention_dropout
lowercase : Dict =initializer_range
lowercase : Tuple =initializer_factor
@classmethod
def lowerCamelCase_ ( cls : Optional[int] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase : List[Any] =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowercase : Optional[Any] =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit_vision_model'
def __init__( self : Tuple , UpperCAmelCase__ : int=768 , UpperCAmelCase__ : Tuple=3072 , UpperCAmelCase__ : Dict=12 , UpperCAmelCase__ : Optional[Any]=12 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : int=32 , UpperCAmelCase__ : int="quick_gelu" , UpperCAmelCase__ : str=1E-5 , UpperCAmelCase__ : Optional[Any]=0.0 , UpperCAmelCase__ : Union[str, Any]=0.02 , UpperCAmelCase__ : Dict=1.0 , **UpperCAmelCase__ : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : Dict =hidden_size
lowercase : List[Any] =intermediate_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Tuple =num_attention_heads
lowercase : str =num_channels
lowercase : Tuple =image_size
lowercase : Optional[Any] =patch_size
lowercase : List[str] =hidden_act
lowercase : Any =layer_norm_eps
lowercase : List[str] =attention_dropout
lowercase : Union[str, Any] =initializer_range
lowercase : List[str] =initializer_factor
@classmethod
def lowerCamelCase_ ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase : str =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
lowercase : Optional[Any] =config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'owlvit'
lowerCamelCase_ = True
def __init__( self : str , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Union[str, Any]=512 , UpperCAmelCase__ : List[Any]=2.65_92 , UpperCAmelCase__ : Tuple=True , **UpperCAmelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
if text_config is None:
lowercase : Any ={}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
lowercase : Dict ={}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
lowercase : Tuple =OwlViTTextConfig(**UpperCAmelCase__ )
lowercase : Any =OwlViTVisionConfig(**UpperCAmelCase__ )
lowercase : int =projection_dim
lowercase : List[str] =logit_scale_init_value
lowercase : Union[str, Any] =return_dict
lowercase : int =1.0
@classmethod
def lowerCamelCase_ ( cls : Tuple , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase__ )
lowercase : int =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Dict ={}
lowercase : Tuple =text_config
lowercase : Optional[Any] =vision_config
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Optional[int] =copy.deepcopy(self.__dict__ )
lowercase : List[str] =self.text_config.to_dict()
lowercase : Optional[Any] =self.vision_config.to_dict()
lowercase : Tuple =self.__class__.model_type
return output
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return 1E-4
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : "ProcessorMixin" , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : int = -1 , UpperCAmelCase__ : Optional["TensorType"] = None , ):
'''simple docstring'''
lowercase : Dict =super().generate_dummy_inputs(
processor.tokenizer , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , framework=UpperCAmelCase__ )
lowercase : List[str] =super().generate_dummy_inputs(
processor.image_processor , batch_size=UpperCAmelCase__ , framework=UpperCAmelCase__ )
return {**text_input_dict, **image_input_dict}
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return 14
| 719
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
UpperCamelCase_ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 88
| 0
|
'''simple docstring'''
import operator as op
def _a ( _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : List[Any] = []
__snake_case : Optional[int] = lambda _lowerCamelCase , _lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
__snake_case : List[Any] = {
"""^""": op.pow,
"""*""": op.mul,
"""/""": div,
"""+""": op.add,
"""-""": op.sub,
} # operators & their respective operation
# print table header
print("""Symbol""".center(8 ) , """Action""".center(12 ) , """Stack""" , sep=""" | """ )
print("""-""" * (30 + len(_lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(_lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("""push(""" + x + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
else:
__snake_case : List[str] = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + b + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
__snake_case : Dict = stack.pop() # pop stack
# output in tabular format
print("""""".rjust(8 ) , ("""pop(""" + a + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ )
stack.append(
str(opr[x](int(_lowerCamelCase ) , int(_lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("""push(""" + a + x + b + """)""").ljust(12 ) , """,""".join(_lowerCamelCase ) , sep=""" | """ , )
return int(stack[0] )
if __name__ == "__main__":
__UpperCamelCase = input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 26
|
'''simple docstring'''
import math
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_SCREAMING_SNAKE_CASE : List[str] = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1 , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = factor * value
_SCREAMING_SNAKE_CASE : Optional[Any] = value
while not is_prime(SCREAMING_SNAKE_CASE__ ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE__ )
return value
| 533
| 0
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : int , A : float) -> float:
"""simple docstring"""
return 0.0
def A ( _UpperCAmelCase : np.ndarray , _UpperCAmelCase : int ) -> tuple[int | float, int | float]:
'''simple docstring'''
_UpperCAmelCase = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
_UpperCAmelCase = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def A ( _UpperCAmelCase : FilterType , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_UpperCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.abs(np.fft.fft(_UpperCAmelCase ) )
_UpperCAmelCase = 20 * np.logaa(_UpperCAmelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
# Display within reasonable bounds
_UpperCAmelCase = get_bounds(_UpperCAmelCase , _UpperCAmelCase )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('Gain (dB)' )
plt.plot(_UpperCAmelCase )
plt.show()
def A ( _UpperCAmelCase : FilterType , _UpperCAmelCase : int ) -> None:
'''simple docstring'''
_UpperCAmelCase = 512
_UpperCAmelCase = [1] + [0] * (size - 1)
_UpperCAmelCase = [filter_type.process(_UpperCAmelCase ) for item in inputs]
_UpperCAmelCase = [0] * (samplerate - size) # zero-padding
outputs += filler
_UpperCAmelCase = np.angle(np.fft.fft(_UpperCAmelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('Frequency (Hz)' )
plt.xscale('log' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('Phase shift (Radians)' )
plt.plot(np.unwrap(_UpperCAmelCase , -2 * pi ) )
plt.show()
| 639
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
# Initialise PyTorch model
_UpperCAmelCase = TaConfig.from_json_file(_UpperCAmelCase )
print(F"Building PyTorch model from configuration: {config}" )
_UpperCAmelCase = TaForConditionalGeneration(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 639
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCamelCase__ : str = {
'''bart''': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''bert''': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''bert-base-cased-finetuned-mrpc''': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''dpr''': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''gpt2''': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlnet''': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm''': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''xlm-roberta''': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''transfo-xl''': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''openai-gpt''': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''roberta''': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''layoutlm''': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'''roberta-large-mnli''': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''camembert''': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''flaubert''': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert''': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''distilbert-base-distilled-squad''': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert''': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''lxmert-visual-feature-encoder''': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''ctrl''': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''albert''': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''t5''': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''electra''': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'''wav2vec2''': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : int=False , lowerCamelCase_ : Optional[Any]=True ) -> List[str]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
SCREAMING_SNAKE_CASE_ : Optional[int] = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
SCREAMING_SNAKE_CASE_ : Dict = config_class.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = True
print(F'Building TensorFlow model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(lowerCamelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
SCREAMING_SNAKE_CASE_ : str = cached_file(
lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
SCREAMING_SNAKE_CASE_ : Optional[int] = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase_ , lowerCamelCase_ )
if compare_with_pt_model:
SCREAMING_SNAKE_CASE_ : Dict = tf_model(tf_model.dummy_inputs , training=lowerCamelCase_ ) # build the network
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : str = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = pt_model(**pt_model.dummy_inputs )
SCREAMING_SNAKE_CASE_ : Optional[int] = pto[0].numpy()
SCREAMING_SNAKE_CASE_ : List[str] = tfo[0].numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(lowerCamelCase_ , save_format='h5' )
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : List[str]=None , lowerCamelCase_ : Union[str, Any]=None , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : str=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[Any]=False , ) -> Dict:
"""simple docstring"""
if args_model_type is None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(MODEL_CLASSES.keys() )
else:
SCREAMING_SNAKE_CASE_ : Dict = [args_model_type]
for j, model_type in enumerate(lowerCamelCase_ , start=1 ):
print('=' * 1_00 )
print(F' Converting model type {j}/{len(lowerCamelCase_ )}: {model_type}' )
print('=' * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE_ : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
SCREAMING_SNAKE_CASE_ : Optional[int] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase_ , lowerCamelCase_ ) , start=1 ):
print('-' * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(lowerCamelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_00 )
if config_shortcut_name in aws_config_map:
SCREAMING_SNAKE_CASE_ : Optional[Any] = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE_ : List[str] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
SCREAMING_SNAKE_CASE_ : Dict = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
SCREAMING_SNAKE_CASE_ : Any = model_shortcut_name
if os.path.isfile(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase_ , pytorch_checkpoint_path=lowerCamelCase_ , config_file=lowerCamelCase_ , tf_dump_path=os.path.join(lowerCamelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=lowerCamelCase_ , )
if remove_cached_files:
os.remove(lowerCamelCase_ )
os.remove(lowerCamelCase_ )
if __name__ == "__main__":
UpperCamelCase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_dump_path''', default=None, type=str, required=True, help='''Path to the output Tensorflow dump file.'''
)
parser.add_argument(
'''--model_type''',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'''convert all the models from AWS.'''
),
)
parser.add_argument(
'''--pytorch_checkpoint_path''',
default=None,
type=str,
help=(
'''Path to the PyTorch checkpoint path or shortcut name to download from AWS. '''
'''If not given, will download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
help=(
'''The config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture. If not given and '''
'''--pytorch_checkpoint_path is not given or is a shortcut name '''
'''use the configuration associated to the shortcut name on the AWS'''
),
)
parser.add_argument(
'''--compare_with_pt_model''', action='''store_true''', help='''Compare Tensorflow and PyTorch model predictions.'''
)
parser.add_argument(
'''--use_cached_models''',
action='''store_true''',
help='''Use cached models if possible instead of updating to latest checkpoint versions.''',
)
parser.add_argument(
'''--remove_cached_files''',
action='''store_true''',
help='''Remove pytorch models after conversion (save memory when converting in batches).''',
)
parser.add_argument('''--only_convert_finetuned_models''', action='''store_true''', help='''Only convert finetuned models.''')
UpperCamelCase__ : str = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 105
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class a_ ( a_ ):
'''simple docstring'''
__a: jnp.ndarray
__a: jnp.ndarray
class a_ ( nn.Module ):
'''simple docstring'''
__a: int
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
__a: jnp.dtype = jnp.floataa
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
lowerCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
lowerCAmelCase_ = self.block_out_channels[i]
lowerCAmelCase_ = self.block_out_channels[i + 1]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowercase_ )
lowerCAmelCase_ = blocks
lowerCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
for block in self.blocks:
lowerCAmelCase_ = block(lowercase_ )
lowerCAmelCase_ = nn.silu(lowercase_ )
lowerCAmelCase_ = self.conv_out(lowercase_ )
return embedding
@flax_register_to_config
class a_ ( nn.Module , a_ , a_ ):
'''simple docstring'''
__a: int = 3_2
__a: int = 4
__a: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__a: Union[bool, Tuple[bool]] = False
__a: Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
__a: int = 2
__a: Union[int, Tuple[int]] = 8
__a: Optional[Union[int, Tuple[int]]] = None
__a: int = 1_2_8_0
__a: float = 0.0
__a: bool = False
__a: jnp.dtype = jnp.floataa
__a: bool = True
__a: int = 0
__a: str = "rgb"
__a: Tuple[int] = (1_6, 3_2, 9_6, 2_5_6)
def _lowercase ( self , lowercase_ ) -> FrozenDict:
'''simple docstring'''
lowerCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
lowerCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
lowerCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
lowerCAmelCase_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
lowerCAmelCase_ , lowerCAmelCase_ = jax.random.split(lowercase_ )
lowerCAmelCase_ = {'params': params_rng, 'dropout': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = self.block_out_channels
lowerCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
lowerCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
lowerCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
lowerCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
lowerCAmelCase_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
lowerCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
lowerCAmelCase_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
lowerCAmelCase_ = []
lowerCAmelCase_ = []
lowerCAmelCase_ = block_out_channels[0]
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
for i, down_block_type in enumerate(self.down_block_types ):
lowerCAmelCase_ = output_channel
lowerCAmelCase_ = block_out_channels[i]
lowerCAmelCase_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
lowerCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
lowerCAmelCase_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
for _ in range(self.layers_per_block ):
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
if not is_final_block:
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowercase_ )
lowerCAmelCase_ = down_blocks
lowerCAmelCase_ = controlnet_down_blocks
# mid
lowerCAmelCase_ = block_out_channels[-1]
lowerCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowercase_ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
lowerCAmelCase_ = nn.Conv(
lowercase_ , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ = 1.0 , lowercase_ = True , lowercase_ = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
lowerCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
lowerCAmelCase_ = jnp.flip(lowercase_ , axis=1 )
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
lowerCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
lowerCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
lowerCAmelCase_ = jnp.expand_dims(lowercase_ , 0 )
lowerCAmelCase_ = self.time_proj(lowercase_ )
lowerCAmelCase_ = self.time_embedding(lowercase_ )
# 2. pre-process
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.conv_in(lowercase_ )
lowerCAmelCase_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
lowerCAmelCase_ = self.controlnet_cond_embedding(lowercase_ )
sample += controlnet_cond
# 3. down
lowerCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
lowerCAmelCase_ , lowerCAmelCase_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
lowerCAmelCase_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
# 5. contronet blocks
lowerCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowercase_ , self.controlnet_down_blocks ):
lowerCAmelCase_ = controlnet_block(lowercase_ )
controlnet_down_block_res_samples += (down_block_res_sample,)
lowerCAmelCase_ = controlnet_down_block_res_samples
lowerCAmelCase_ = self.controlnet_mid_block(lowercase_ )
# 6. scaling
lowerCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowercase_ , mid_block_res_sample=lowercase_ )
| 318
| 0
|
'''simple docstring'''
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(lowerCAmelCase_ ) * abs(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 47
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__snake_case :List[str] =logging.get_logger(__name__)
__snake_case :Any ={
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__snake_case :Any =[
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
A = 'lm_head'
A = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
A = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def lowerCamelCase_ ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
A = []
A = fairseq_model.state_dict()
A = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == 'group' , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A = True
if "*" in mapped_key:
A = name.split(lowerCAmelCase__ )[0].split('.' )[-2]
A = mapped_key.replace('*' , lowerCAmelCase__ )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "bias" in name:
A = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A = 'weight'
else:
A = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : str=True ) -> str:
'''simple docstring'''
if config_path is not None:
A = UniSpeechConfig.from_pretrained(lowerCAmelCase__ )
else:
A = UniSpeechConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load_from_json(lowerCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(lowerCAmelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase__ ) )
return
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
A = target_dict.indices
# fairseq has the <pad> and <s> switched
A = 42
A = 43
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
A = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase__ , )
A = True if config.feat_extract_norm == 'layer' else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , )
A = WavaVecaProcessor(feature_extractor=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
A = UniSpeechForCTC(lowerCAmelCase__ )
else:
A = UniSpeechForPreTraining(lowerCAmelCase__ )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
hf_unispeech.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
__snake_case :Any =argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__snake_case :Optional[Any] =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 106
|
from bisect import bisect
from itertools import accumulate
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
A = sorted(zip(lowerCAmelCase__ , lowerCAmelCase__ ) , key=lambda lowerCAmelCase__ : x[0] / x[1] , reverse=lowerCAmelCase__ )
A , A = [i[0] for i in r], [i[1] for i in r]
A = list(accumulate(lowerCAmelCase__ ) )
A = bisect(lowerCAmelCase__ , lowerCAmelCase__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 106
| 1
|
lowerCAmelCase_ = {
"""Pillow""": """Pillow""",
"""accelerate""": """accelerate>=0.11.0""",
"""compel""": """compel==0.1.8""",
"""black""": """black~=23.1""",
"""datasets""": """datasets""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.13.2""",
"""requests-mock""": """requests-mock==1.10.0""",
"""importlib_metadata""": """importlib_metadata""",
"""invisible-watermark""": """invisible-watermark""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2""",
"""jaxlib""": """jaxlib>=0.1.65""",
"""Jinja2""": """Jinja2""",
"""k-diffusion""": """k-diffusion>=0.0.12""",
"""torchsde""": """torchsde""",
"""note_seq""": """note_seq""",
"""librosa""": """librosa""",
"""numpy""": """numpy""",
"""omegaconf""": """omegaconf""",
"""parameterized""": """parameterized""",
"""protobuf""": """protobuf>=3.20.3,<4""",
"""pytest""": """pytest""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""ruff""": """ruff>=0.0.241""",
"""safetensors""": """safetensors""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""scipy""": """scipy""",
"""onnx""": """onnx""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""tensorboard""": """tensorboard""",
"""torch""": """torch>=1.4""",
"""torchvision""": """torchvision""",
"""transformers""": """transformers>=4.25.1""",
"""urllib3""": """urllib3<=2.0.0""",
}
| 704
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase_ = {
"""vocab_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt""",
},
"""tokenizer_file""": {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-openqa""": (
"""https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-nq-reader""": (
"""https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-openqa""": (
"""https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json"""
),
"""google/realm-orqa-wq-reader""": (
"""https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json"""
),
},
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": 512,
"""google/realm-cc-news-pretrained-encoder""": 512,
"""google/realm-cc-news-pretrained-scorer""": 512,
"""google/realm-cc-news-pretrained-openqa""": 512,
"""google/realm-orqa-nq-openqa""": 512,
"""google/realm-orqa-nq-reader""": 512,
"""google/realm-orqa-wq-openqa""": 512,
"""google/realm-orqa-wq-reader""": 512,
}
lowerCAmelCase_ = {
"""google/realm-cc-news-pretrained-embedder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-encoder""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-scorer""": {"""do_lower_case""": True},
"""google/realm-cc-news-pretrained-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-nq-reader""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-openqa""": {"""do_lower_case""": True},
"""google/realm-orqa-wq-reader""": {"""do_lower_case""": True},
}
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Union[str, Any] =VOCAB_FILES_NAMES
a_ : List[str] =PRETRAINED_VOCAB_FILES_MAP
a_ : str =PRETRAINED_INIT_CONFIGURATION
a_ : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ : List[Any] =RealmTokenizer
def __init__( self : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Optional[Any]="[UNK]" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Optional[Any]="[PAD]" , UpperCamelCase : Optional[int]="[CLS]" , UpperCamelCase : Optional[Any]="[MASK]" , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]=None , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , tokenize_chinese_chars=UpperCamelCase , strip_accents=UpperCamelCase , **UpperCamelCase , )
_snake_case : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase ) != tokenize_chinese_chars
):
_snake_case : int = getattr(UpperCamelCase , normalizer_state.pop('type' ) )
_snake_case : List[str] = do_lower_case
_snake_case : List[Any] = strip_accents
_snake_case : Dict = tokenize_chinese_chars
_snake_case : Any = normalizer_class(**UpperCamelCase )
_snake_case : Optional[int] = do_lower_case
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = PaddingStrategy.MAX_LENGTH
_snake_case : Any = text
_snake_case : List[str] = kwargs.pop('text_pair' , UpperCamelCase )
_snake_case : int = kwargs.pop('return_tensors' , UpperCamelCase )
_snake_case : Optional[int] = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(UpperCamelCase ):
if batch_text_pair is not None:
_snake_case : List[Any] = batch_text_pair[idx]
else:
_snake_case : Optional[Any] = None
_snake_case : Optional[int] = super().__call__(UpperCamelCase , UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
_snake_case : str = encoded_candidates.get('input_ids' )
_snake_case : Tuple = encoded_candidates.get('attention_mask' )
_snake_case : List[str] = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(UpperCamelCase )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(UpperCamelCase )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(UpperCamelCase )
_snake_case : str = {key: item for key, item in output_data.items() if len(UpperCamelCase ) != 0}
return BatchEncoding(UpperCamelCase , tensor_type=UpperCamelCase )
def UpperCamelCase_ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any]=None ):
'''simple docstring'''
_snake_case : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case : int = [self.sep_token_id]
_snake_case : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
_snake_case : Optional[Any] = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
| 669
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCamelCase ( ) -> Dict:
"""simple docstring"""
A__ = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=__a , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=__a , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=__a )
return parser.parse_args()
def __lowerCamelCase ( ) -> Tuple:
"""simple docstring"""
A__ = parse_args()
# Import training_script as a module.
A__ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
A__ = script_fpath.stem
A__ = importlib.import_module(__a )
# Patch sys.argv
A__ = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 176
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
A : Optional[int] = {
'''configuration_pix2struct''': [
'''PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Pix2StructConfig''',
'''Pix2StructTextConfig''',
'''Pix2StructVisionConfig''',
],
'''processing_pix2struct''': ['''Pix2StructProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[int] = ['''Pix2StructImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
'''PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Pix2StructPreTrainedModel''',
'''Pix2StructForConditionalGeneration''',
'''Pix2StructVisionModel''',
'''Pix2StructTextModel''',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 176
| 1
|
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE_ ( snake_case_ : int = 20 ) -> int:
SCREAMING_SNAKE_CASE : Any = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE : Optional[int] = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
__UpperCAmelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 220
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list ) -> bool:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case_ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(snake_case_ ) == 1:
return True
SCREAMING_SNAKE_CASE : Any = series[1] - series[0]
for index in range(len(snake_case_ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def SCREAMING_SNAKE_CASE_ ( snake_case_ : list ) -> float:
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(snake_case_ ) == 0:
raise ValueError('Input list must be a non empty list' )
SCREAMING_SNAKE_CASE : List[str] = 0
for val in series:
answer += val
return answer / len(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 220
| 1
|
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :str ):
UpperCAmelCase_ = len(__magic_name__ )
UpperCAmelCase_ = []
for i in range(len(__magic_name__ ) - pat_len + 1 ):
UpperCAmelCase_ = True
for j in range(__magic_name__ ):
if s[i + j] != pattern[j]:
UpperCAmelCase_ = False
break
if match_found:
position.append(__magic_name__ )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 121
|
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
UpperCAmelCase_ = remove_duplicates(key.upper() )
UpperCAmelCase_ = len(__magic_name__ )
# First fill cipher with key characters
UpperCAmelCase_ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 2_6 ):
UpperCAmelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase_ = alphabet[i - offset]
UpperCAmelCase_ = char
return cipher_alphabet
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :dict[str, str] ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :dict[str, str] ):
UpperCAmelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = input('''Enter message to encode or decode: ''' ).strip()
UpperCAmelCase_ = input('''Enter keyword: ''' ).strip()
UpperCAmelCase_ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
UpperCAmelCase_ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
UpperCAmelCase_ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121
| 1
|
"""simple docstring"""
import os
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ):
'''simple docstring'''
a : List[str] = len(grid[0] )
a : Union[str, Any] = len(_lowercase )
a : str = 0
a : List[str] = 0
a : List[Any] = 0
# Check vertically, horizontally, diagonally at the same time (only works
# for nxn grid)
for i in range(_lowercase ):
for j in range(n_rows - 3 ):
a : Tuple = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i]
a : Dict = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3]
# Left-to-right diagonal (\) product
if i < n_columns - 3:
a : int = (
grid[i][j]
* grid[i + 1][j + 1]
* grid[i + 2][j + 2]
* grid[i + 3][j + 3]
)
# Right-to-left diagonal(/) product
if i > 2:
a : int = (
grid[i][j]
* grid[i - 1][j + 1]
* grid[i - 2][j + 2]
* grid[i - 3][j + 3]
)
a : List[Any] = max(
_lowercase , _lowercase , _lowercase , _lowercase )
if max_product > largest:
a : Any = max_product
return largest
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
a : List[str] = []
with open(os.path.dirname(_lowercase ) + "/grid.txt" ) as file:
for line in file:
grid.append(line.strip("\n" ).split(" " ) )
a : Dict = [[int(_lowercase ) for i in grid[j]] for j in range(len(_lowercase ) )]
return largest_product(_lowercase )
if __name__ == "__main__":
print(solution())
| 713
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 200 ) ->int:
'''simple docstring'''
a : Dict = [1, 2, 5, 10, 20, 50, 100, 200]
a : Optional[Any] = [0] * (pence + 1)
a : List[Any] = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(_lowercase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 31
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class UpperCAmelCase_ ( __a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = None
class UpperCAmelCase_ ( __a , __a):
lowerCamelCase__ = 2
@register_to_config
def __init__( self, __a = 0.02, __a = 100, __a = 1.007, __a = 80, __a = 0.05, __a = 50, ):
'''simple docstring'''
_lowerCAmelCase : Dict = sigma_max
# setable values
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : int = None
_lowerCAmelCase : Any = None # sigma(t_i)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return sample
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = num_inference_steps
_lowerCAmelCase : List[Any] = np.arange(0, self.num_inference_steps)[::-1].copy()
_lowerCAmelCase : List[str] = torch.from_numpy(lowercase_).to(lowercase_)
_lowerCAmelCase : Optional[Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_lowerCAmelCase : Dict = torch.tensor(lowercase_, dtype=torch.floataa, device=lowercase_)
def snake_case__ ( self, __a, __a, __a = None):
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_lowerCAmelCase : str = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1)
else:
_lowerCAmelCase : Tuple = 0
# sample eps ~ N(0, S_noise^2 * I)
_lowerCAmelCase : Optional[int] = self.config.s_noise * randn_tensor(sample.shape, generator=lowercase_).to(sample.device)
_lowerCAmelCase : List[str] = sigma + gamma * sigma
_lowerCAmelCase : str = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case__ ( self, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Any = sample_hat + sigma_hat * model_output
_lowerCAmelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
_lowerCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_, derivative=lowercase_, pred_original_sample=lowercase_)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = sample_prev + sigma_prev * model_output
_lowerCAmelCase : Any = (sample_prev - pred_original_sample) / sigma_prev
_lowerCAmelCase : List[str] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase_, derivative=lowercase_, pred_original_sample=lowercase_)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
raise NotImplementedError()
| 500
|
'''simple docstring'''
import math
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE_ )
if number < 1:
lowercase_ = f"""Input value of [number={number}] must be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowercase_ = int(math.log(number // 3 , 2 ) ) + 2
lowercase_ = [3, 5]
lowercase_ = 2
lowercase_ = 3
for block in range(1 , SCREAMING_SNAKE_CASE_ ):
for _ in range(SCREAMING_SNAKE_CASE_ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__snake_case = 0
try:
__snake_case = proth(number)
except ValueError:
print(f'''ValueError: there is no {number}th Proth number''')
continue
print(f'''The {number}th Proth number: {value}''')
| 451
| 0
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowerCAmelCase_ : Union[str, Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , __lowerCamelCase ):
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
lowercase_ = load_tool("text-question-answering")
self.tool.setup()
lowercase_ = load_tool("text-question-answering" , remote=__lowerCAmelCase)
def __UpperCAmelCase ( self : Dict) -> int:
lowercase_ = self.tool(__lowerCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : int) -> Dict:
lowercase_ = self.remote_tool(__lowerCAmelCase , "What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : Optional[int]) -> int:
lowercase_ = self.tool(text=__lowerCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
def __UpperCAmelCase ( self : List[Any]) -> str:
lowercase_ = self.remote_tool(text=__lowerCAmelCase , question="What did Hugging Face do in April 2021?")
self.assertEqual(__lowerCAmelCase , "launched the BigScience Research Workshop")
| 714
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =['pixel_values']
def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : int = 32 , __lowerCAmelCase : List[str]=PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Union[str, Any] , ) -> None:
lowercase_ = do_resize
lowercase_ = do_rescale
lowercase_ = size_divisor
lowercase_ = resample
super().__init__(**__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : Any) -> np.ndarray:
lowercase_ , lowercase_ = get_image_size(__lowerCAmelCase)
# Rounds the height and width down to the closest multiple of size_divisor
lowercase_ = height // size_divisor * size_divisor
lowercase_ = width // size_divisor * size_divisor
lowercase_ = resize(__lowerCAmelCase , (new_h, new_w) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
return image
def __UpperCAmelCase ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : int) -> np.ndarray:
return rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[TensorType, str]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : List[str] , ) -> BatchFeature:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = size_divisor if size_divisor is not None else self.size_divisor
lowercase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing")
lowercase_ = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError("Invalid image(s)")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(__lowerCAmelCase) for img in images]
if do_resize:
lowercase_ = [self.resize(__lowerCAmelCase , size_divisor=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_rescale:
lowercase_ = [self.rescale(__lowerCAmelCase , scale=1 / 255) for image in images]
lowercase_ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowercase_ = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 461
| 0
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowerCAmelCase_ ( __snake_case ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = False , _lowerCAmelCase = False , _lowerCAmelCase = None , **_lowerCAmelCase , ):
super().__init__(
_lowerCAmelCase , split=_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase , streaming=_lowerCAmelCase , num_proc=_lowerCAmelCase , **_lowerCAmelCase , )
_lowercase : Dict = path_or_paths if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else {self.split: path_or_paths}
_lowercase : Tuple = Text(
cache_dir=_lowerCAmelCase , data_files=_lowerCAmelCase , features=_lowerCAmelCase , **_lowerCAmelCase , )
def __a ( self ):
# Build iterable dataset
if self.streaming:
_lowercase : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowercase : Any = None
_lowercase : Optional[int] = None
_lowercase : Union[str, Any] = None
_lowercase : str = None
self.builder.download_and_prepare(
download_config=_lowerCAmelCase , download_mode=_lowerCAmelCase , verification_mode=_lowerCAmelCase , base_path=_lowerCAmelCase , num_proc=self.num_proc , )
_lowercase : Dict = self.builder.as_dataset(
split=self.split , verification_mode=_lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 66
|
'''simple docstring'''
class UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple ) -> List[Any]:
_UpperCamelCase =''''''
_UpperCamelCase =''''''
_UpperCamelCase =[]
def UpperCamelCase__ ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
_UpperCamelCase =self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
_UpperCamelCase =self.__min_dist_top_down_dp(m - 1 , n - 1 )
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def UpperCamelCase__ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =[[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def UpperCamelCase__ ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : str ) -> int:
_UpperCamelCase =worda
_UpperCamelCase =worda
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =len(UpperCamelCase__ )
_UpperCamelCase =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
_UpperCamelCase =j
elif j == 0: # second string is empty
_UpperCamelCase =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
_UpperCamelCase =self.dp[i - 1][j - 1]
else:
_UpperCamelCase =self.dp[i][j - 1]
_UpperCamelCase =self.dp[i - 1][j]
_UpperCamelCase =self.dp[i - 1][j - 1]
_UpperCamelCase =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__lowerCamelCase : int = EditDistance()
print('****************** Testing Edit Distance DP Algorithm ******************')
print()
__lowerCamelCase : Optional[int] = input('Enter the first string: ').strip()
__lowerCamelCase : Optional[int] = input('Enter the second string: ').strip()
print()
print(F"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(F"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print('*************** End of Testing Edit Distance DP Algorithm ***************')
| 404
| 0
|
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def __UpperCAmelCase ( snake_case_ : int ):
'''simple docstring'''
UpperCAmelCase: str = 0
UpperCAmelCase: str = number
while duplicate > 0:
UpperCAmelCase , UpperCAmelCase: Any = divmod(snake_case_ , 1_0 )
fact_sum += factorial(snake_case_ )
return fact_sum == number
if __name__ == "__main__":
print('Program to check whether a number is a Krisnamurthy Number or not.')
snake_case_ : List[str] = int(input('Enter number: ').strip())
print(
f"""{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."""
)
| 166
|
def __UpperCAmelCase ( snake_case_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
'''simple docstring'''
try:
UpperCAmelCase: Optional[int] = int(snake_case_ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
UpperCAmelCase: List[Any] = 2
UpperCAmelCase: Any = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
UpperCAmelCase: Tuple = i
while n % i == 0:
UpperCAmelCase: Union[str, Any] = n // i
i += 1
return int(snake_case_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 166
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt"""}
A__ : Union[str, Any] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
A__ : Tuple = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
A__ : Optional[Any] = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Any = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[str] = ConvBertTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE_ , normalizer_state.pop('type' ) )
__lowerCamelCase : List[str] = do_lower_case
__lowerCamelCase : Union[str, Any] = strip_accents
__lowerCamelCase : Any = tokenize_chinese_chars
__lowerCamelCase : Any = normalizer_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Dict:
__lowerCamelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : List[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 13
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13
| 1
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCAmelCase__ : Optional[Any] = '''\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
'''
lowerCAmelCase__ : Optional[Any] = '''\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
'''
lowerCAmelCase__ : Optional[int] = '''
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
"accuracy": Accuracy
"f1": F1 score
"pearson": Pearson Correlation
"spearmanr": Spearman Correlation
"matthews_correlation": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})
{\'pearson\': 1.0, \'spearmanr\': 1.0}
>>> glue_metric = datasets.load_metric(\'glue\', \'cola\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def UpperCamelCase__ ( A__ , A__ ) -> List[str]:
return float((preds == labels).mean() )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
snake_case__ : Any = simple_accuracy(A__ , A__ )
snake_case__ : int = float(fa_score(y_true=A__ , y_pred=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def UpperCamelCase__ ( A__ , A__ ) -> Optional[int]:
snake_case__ : List[str] = float(pearsonr(A__ , A__ )[0] )
snake_case__ : Tuple = float(spearmanr(A__ , A__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self ) -> List[str]:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__UpperCamelCase , __UpperCamelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__UpperCamelCase , __UpperCamelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__UpperCamelCase , __UpperCamelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 699
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ : Dict = logging.get_logger(__name__)
lowerCAmelCase__ : int = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = """poolformer"""
def __init__( self , __UpperCamelCase=3 , __UpperCamelCase=16 , __UpperCamelCase=16 , __UpperCamelCase=3 , __UpperCamelCase=4.0 , __UpperCamelCase=[2, 2, 6, 2] , __UpperCamelCase=[64, 128, 320, 512] , __UpperCamelCase=[7, 3, 3, 3] , __UpperCamelCase=[4, 2, 2, 2] , __UpperCamelCase=[2, 1, 1, 1] , __UpperCamelCase=4 , __UpperCamelCase=0.0 , __UpperCamelCase="gelu" , __UpperCamelCase=True , __UpperCamelCase=1E-5 , __UpperCamelCase=0.0_2 , **__UpperCamelCase , ) -> Any:
'''simple docstring'''
snake_case__ : List[str] = num_channels
snake_case__ : Dict = patch_size
snake_case__ : Optional[int] = stride
snake_case__ : str = padding
snake_case__ : List[str] = pool_size
snake_case__ : List[Any] = hidden_sizes
snake_case__ : List[Any] = mlp_ratio
snake_case__ : Union[str, Any] = depths
snake_case__ : Dict = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Dict = num_encoder_blocks
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Optional[Any] = use_layer_scale
snake_case__ : int = layer_scale_init_value
snake_case__ : Dict = initializer_range
super().__init__(**__UpperCamelCase )
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def __a ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __a ( self ) -> float:
'''simple docstring'''
return 2E-3
| 699
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : int=13 , lowerCAmelCase__ : Any=3 , lowerCAmelCase__ : Optional[Any]=224 , lowerCAmelCase__ : str=30 , lowerCAmelCase__ : int=400 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : int=[0.5, 0.5, 0.5] , lowerCAmelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
_UpperCamelCase = size if size is not None else {'''height''': 18, '''width''': 18}
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = num_channels
_UpperCamelCase = image_size
_UpperCamelCase = min_resolution
_UpperCamelCase = max_resolution
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean
_UpperCamelCase = image_std
def snake_case__ ( self : Any ) -> str:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = ViTImageProcessor if is_vision_available() else None
def snake_case__ ( self : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase = EfficientFormerImageProcessorTester(self )
@property
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def snake_case__ ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_mean''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''image_std''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_normalize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''do_resize''' ) )
self.assertTrue(hasattr(lowerCAmelCase__ , '''size''' ) )
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def snake_case__ ( self : str ) -> str:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCamelCase = prepare_image_inputs(self.image_proc_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
_UpperCamelCase = image_processor(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
# Test batched
_UpperCamelCase = image_processor(lowerCAmelCase__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size['''height'''],
self.image_proc_tester.size['''width'''],
) , )
| 98
|
def UpperCAmelCase__ ( __magic_name__ : int = 1_00 ):
'''simple docstring'''
lowerCAmelCase : Dict = set()
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : List[Any] = n + 1 # maximum limit
for a in range(2 , __magic_name__ ):
for b in range(2 , __magic_name__ ):
lowerCAmelCase : Tuple = a**b # calculates the current power
collect_powers.add(__magic_name__ ) # adds the result to the set
return len(__magic_name__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 348
| 0
|
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ : Dict = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class snake_case_ :
'''simple docstring'''
lowerCamelCase = 42
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : List[Any] ) -> Union[str, Any]:
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return self.major, self.minor, self.patch
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
if isinstance(__magic_name__ , __magic_name__ ):
return Version(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ):
return other
raise TypeError(F"{other} (type {type(__magic_name__ )}) cannot be compared to version." )
def __eq__( self : Optional[Any] , __magic_name__ : Any ) -> List[Any]:
try:
lowerCamelCase_ : Dict = self._validate_operand(__magic_name__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str , __magic_name__ : List[Any] ) -> Dict:
lowerCamelCase_ : Optional[Any] = self._validate_operand(__magic_name__ )
return self.tuple < other.tuple
def __hash__( self : Dict ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __magic_name__ : Any ) -> Tuple:
lowerCamelCase_ : Optional[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
return self.version_str
def __a ( __UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
lowerCamelCase_ : Tuple = _VERSION_REG.match(__UpperCAmelCase )
if not res:
raise ValueError(f"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(__UpperCAmelCase ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __a ( __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return ".".join(str(__UpperCAmelCase ) for v in version_tuple )
| 253
|
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase = LayoutLMTokenizer
lowerCamelCase = LayoutLMTokenizerFast
lowerCamelCase = True
lowerCamelCase = True
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
super().setUp()
lowerCamelCase_ : int = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , **__magic_name__ : Dict ) -> List[str]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : str = "UNwant\u00E9d,running"
lowerCamelCase_ : List[Any] = "unwanted, running"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
lowerCamelCase_ : Dict = self.tokenizer_class(self.vocab_file )
lowerCamelCase_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__magic_name__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [7, 4, 5, 10, 8, 9] )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
pass
| 253
| 1
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = [
["""attention""", """attn"""],
["""encoder_attention""", """encoder_attn"""],
["""q_lin""", """q_proj"""],
["""k_lin""", """k_proj"""],
["""v_lin""", """v_proj"""],
["""out_lin""", """out_proj"""],
["""norm_embeddings""", """layernorm_embedding"""],
["""position_embeddings""", """embed_positions"""],
["""embeddings""", """embed_tokens"""],
["""ffn.lin""", """fc"""],
]
def __lowercase ( snake_case ):
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__magic_name__ :Any = k.replace(snake_case, snake_case )
if k.startswith('''encoder''' ):
__magic_name__ :str = k.replace('''.attn''', '''.self_attn''' )
__magic_name__ :Dict = k.replace('''norm1''', '''self_attn_layer_norm''' )
__magic_name__ :Dict = k.replace('''norm2''', '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
__magic_name__ :Any = k.replace('''norm1''', '''self_attn_layer_norm''' )
__magic_name__ :Dict = k.replace('''norm2''', '''encoder_attn_layer_norm''' )
__magic_name__ :Optional[Any] = k.replace('''norm3''', '''final_layer_norm''' )
return k
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :List[Any] = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
__magic_name__ :Dict = sd.pop(snake_case )
__magic_name__ :int = k.replace('''layernorm_embedding''', '''layer_norm''' )
assert new_k not in sd
__magic_name__ :Optional[Any] = v
SCREAMING_SNAKE_CASE__ : List[str] = ["""START"""]
@torch.no_grad()
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = torch.load(snake_case, map_location='''cpu''' )
__magic_name__ :Tuple = model['''model''']
__magic_name__ :Dict = BlenderbotConfig.from_json_file(snake_case )
__magic_name__ :str = BlenderbotForConditionalGeneration(snake_case )
__magic_name__ :Tuple = m.model.state_dict().keys()
__magic_name__ :Optional[int] = []
__magic_name__ :int = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__magic_name__ :Dict = rename_state_dict_key(snake_case )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__magic_name__ :Optional[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(snake_case )
m.model.load_state_dict(snake_case, strict=snake_case )
m.half()
m.save_pretrained(snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--src_path""", type=str, help="""like blenderbot-model.bin""")
parser.add_argument("""--save_dir""", default="""hf_blenderbot""", type=str, help="""Where to save converted model.""")
parser.add_argument(
"""--hf_config_json""", default="""blenderbot-3b-config.json""", type=str, help="""Path to config to use"""
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 0
|
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0
| 1
|
"""simple docstring"""
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
A: Optional[int] = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Tuple=None , UpperCamelCase : int=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase )
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : List[str] = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
__lowerCAmelCase : List[int] = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
__lowerCAmelCase : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Use FP16 to accelerate inference.'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Benchmark training of model'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Verbose memory tracing'} )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Trace memory line by line'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Save result to a CSV file'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Save all print statements in a log file'} )
__lowerCAmelCase : bool = field(default=UpperCAmelCase__ , metadata={'help': 'Whether to print environment information'} )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
__lowerCAmelCase : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
__lowerCAmelCase : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
__lowerCAmelCase : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
__lowerCAmelCase : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
__lowerCAmelCase : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
__lowerCAmelCase : str = field(
default=F"log_{round(time() )}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
__lowerCAmelCase : int = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
__lowerCAmelCase : bool = field(
default=UpperCAmelCase__ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , _SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 359
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = TransfoXLTokenizer
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
UpperCAmelCase : Optional[Any] = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : int = """<unk> UNwanted , running"""
UpperCAmelCase : Dict = """<unk> unwanted, running"""
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tokenizer.tokenize("""<unk> UNwanted , running""" )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ["""<unk>""", """unwanted""", """,""", """running"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [0, 4, 8, 7] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo ! how \n Are yoU ? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[Any] = TransfoXLTokenizer(lower_case=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCAmelCase : Optional[Any] = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(tokenizer.convert_tokens_to_string(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = self.get_tokenizer()
UpperCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
tokenizer.add_tokens(["""new1""", """new2"""] )
tokenizer.move_added_token("""new1""" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("""new1""" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , """new1""" )
| 359
| 1
|
def __UpperCAmelCase ( __a : str ) -> str:
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
_a : Union[str, Any] = ''''''
while len(__a ) % 3 != 0:
_a : Tuple = '''0''' + bin_string
_a : int = [
bin_string[index : index + 3]
for index in range(len(__a ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_a : str = 0
for index, val in enumerate(__a ):
oct_val += int(2 ** (2 - index) * int(__a ) )
oct_string += str(__a )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 14
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase__ =MaskFormerConfig(backbone_config=A )
UpperCAmelCase__ ="huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase__ =847
UpperCAmelCase__ ="maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase__ =150
UpperCAmelCase__ ="ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase__ =171
UpperCAmelCase__ ="maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase__ =133
UpperCAmelCase__ ="coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase__ =19
UpperCAmelCase__ ="cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase__ =65
UpperCAmelCase__ ="mapillary-vistas-id2label.json"
UpperCAmelCase__ =json.load(open(hf_hub_download(A , A , repo_type="dataset" ) , "r" ) )
UpperCAmelCase__ ={int(A ): v for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( A ):
'''simple docstring'''
UpperCAmelCase__ =[]
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( A , A , A ):
'''simple docstring'''
UpperCAmelCase__ =dct.pop(A )
UpperCAmelCase__ =val
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase__ =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase__ =state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[:dim, :]
UpperCAmelCase__ =in_proj_bias[: dim]
UpperCAmelCase__ =in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase__ =in_proj_bias[
dim : dim * 2
]
UpperCAmelCase__ =in_proj_weight[
-dim :, :
]
UpperCAmelCase__ =in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ =config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[: hidden_size, :]
UpperCAmelCase__ =in_proj_bias[:config.hidden_size]
UpperCAmelCase__ =in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ =in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ =in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ =in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase__ =state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase__ =in_proj_weight[: hidden_size, :]
UpperCAmelCase__ =in_proj_bias[:config.hidden_size]
UpperCAmelCase__ =in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase__ =in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase__ =in_proj_weight[-hidden_size :, :]
UpperCAmelCase__ =in_proj_bias[-hidden_size :]
# fmt: on
def _UpperCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase__ ="http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase__ =Image.open(requests.get(A , stream=A ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( A , A , A , A = False ):
'''simple docstring'''
UpperCAmelCase__ =get_maskformer_config(A )
# load original state_dict
with open(A , "rb" ) as f:
UpperCAmelCase__ =pickle.load(A )
UpperCAmelCase__ =data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase__ =create_rename_keys(A )
for src, dest in rename_keys:
rename_key(A , A , A )
read_in_swin_q_k_v(A , config.backbone_config )
read_in_decoder_q_k_v(A , A )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase__ =torch.from_numpy(A )
# load 🤗 model
UpperCAmelCase__ =MaskFormerForInstanceSegmentation(A )
model.eval()
for name, param in model.named_parameters():
print(A , param.shape )
UpperCAmelCase__ , UpperCAmelCase__ =model.load_state_dict(A , strict=A )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(A ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase__ =prepare_img()
if "vistas" in model_name:
UpperCAmelCase__ =65
elif "cityscapes" in model_name:
UpperCAmelCase__ =65535
else:
UpperCAmelCase__ =255
UpperCAmelCase__ =True if "ade" in model_name else False
UpperCAmelCase__ =MaskFormerImageProcessor(ignore_index=A , reduce_labels=A )
UpperCAmelCase__ =image_processor(A , return_tensors="pt" )
UpperCAmelCase__ =model(**A )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase__ =torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , A , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(A ).mkdir(exist_ok=A )
model.save_pretrained(A )
image_processor.save_pretrained(A )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
UpperCamelCase_ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 625
| 0
|
'''simple docstring'''
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __snake_case ( a__ , a__):
_lowerCAmelCase = 1
@register_to_config
def __init__( self, A = 1000, A = None ):
"""simple docstring"""
self.set_timesteps(A )
# standard deviation of the initial noise distribution
lowerCamelCase : Optional[Any] = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowerCamelCase : Dict = 4
# running values
lowerCamelCase : Optional[int] = []
def UpperCAmelCase_ ( self, A, A = None ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = num_inference_steps
lowerCamelCase : List[Any] = torch.linspace(1, 0, num_inference_steps + 1 )[:-1]
lowerCamelCase : Tuple = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowerCamelCase : Union[str, Any] = torch.tensor(self.config.trained_betas, dtype=torch.floataa )
else:
lowerCamelCase : Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2
lowerCamelCase : List[Any] = (1.0 - self.betas**2) ** 0.5
lowerCamelCase : Union[str, Any] = (torch.atana(self.betas, self.alphas ) / math.pi * 2)[:-1]
lowerCamelCase : Any = timesteps.to(A )
lowerCamelCase : List[Any] = []
def UpperCAmelCase_ ( self, A, A, A, A = True, ):
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
lowerCamelCase : int = (self.timesteps == timestep).nonzero().item()
lowerCamelCase : Union[str, Any] = timestep_index + 1
lowerCamelCase : Optional[int] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(A )
if len(self.ets ) == 1:
lowerCamelCase : str = self.ets[-1]
elif len(self.ets ) == 2:
lowerCamelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowerCamelCase : Union[str, Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowerCamelCase : str = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowerCamelCase : Tuple = self._get_prev_sample(A, A, A, A )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A )
def UpperCAmelCase_ ( self, A, *A, **A ):
"""simple docstring"""
return sample
def UpperCAmelCase_ ( self, A, A, A, A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.alphas[timestep_index]
lowerCamelCase : List[str] = self.betas[timestep_index]
lowerCamelCase : Optional[int] = self.alphas[prev_timestep_index]
lowerCamelCase : Optional[int] = self.betas[prev_timestep_index]
lowerCamelCase : List[Any] = (sample - sigma * ets) / max(A, 1e-8 )
lowerCamelCase : int = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 449
|
'''simple docstring'''
from __future__ import annotations
A = '#'
class __snake_case :
def __init__( self ):
"""simple docstring"""
lowerCamelCase : dict = {}
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : int = self._trie
for char in text:
if char not in trie:
lowerCamelCase : Dict = {}
lowerCamelCase : Optional[int] = trie[char]
lowerCamelCase : Optional[Any] = True
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Dict = self._trie
for char in prefix:
if char in trie:
lowerCamelCase : int = trie[char]
else:
return []
return self._elements(A )
def UpperCAmelCase_ ( self, A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = []
for c, v in d.items():
lowerCamelCase : Optional[Any] = [' '] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
A = Trie()
A = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def UpperCAmelCase ( UpperCAmelCase__ : str):
lowerCamelCase : Any = trie.find_word(UpperCAmelCase__)
return tuple(string + word for word in suffixes)
def UpperCAmelCase ( ):
print(autocomplete_using_trie('de'))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 449
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase__ : int = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
UpperCAmelCase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
def _lowercase ( ) -> Any:
UpperCamelCase__ : Optional[int] = cn.convert_to_negative(__SCREAMING_SNAKE_CASE )
# assert negative_img array for at least one True
assert negative_img.any()
def _lowercase ( ) -> Tuple:
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _lowercase ( ) -> str:
UpperCamelCase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _lowercase ( ) -> Tuple:
UpperCamelCase__ : Optional[Any] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : List[str] = canny.canny(__SCREAMING_SNAKE_CASE )
# assert canny array for at least one True
assert canny_array.any()
def _lowercase ( ) -> Dict:
assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all()
def _lowercase ( ) -> Optional[int]:
# laplace diagonals
UpperCamelCase__ : Optional[int] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase__ : Tuple = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE )
assert res.any()
def _lowercase ( ) -> List[str]:
assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ , UpperCamelCase__ : int = sob.sobel_filter(__SCREAMING_SNAKE_CASE )
assert grad.any() and theta.any()
def _lowercase ( ) -> Optional[Any]:
UpperCamelCase__ : str = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 )
assert sepia.all()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
UpperCamelCase__ : int = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _lowercase ( __SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[Any]:
UpperCamelCase__ : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : Optional[Any] = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
UpperCamelCase__ : List[str] = imread(__SCREAMING_SNAKE_CASE , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : List[str] = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = image[x_coordinate][y_coordinate]
UpperCamelCase__ : str = lbp.get_neighbors_pixel(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : Any = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert lbp_image.any()
| 410
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self , UpperCamelCase , UpperCamelCase=2 , UpperCamelCase=True , UpperCamelCase=False , UpperCamelCase=10 , UpperCamelCase=3 , UpperCamelCase=32 * 4 , UpperCamelCase=32 * 6 , UpperCamelCase=4 , UpperCamelCase=32 , ) -> str:
UpperCamelCase__ : int = parent
UpperCamelCase__ : Union[str, Any] = batch_size
UpperCamelCase__ : Dict = is_training
UpperCamelCase__ : Optional[int] = use_auxiliary_loss
UpperCamelCase__ : List[str] = num_queries
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : str = min_size
UpperCamelCase__ : Union[str, Any] = max_size
UpperCamelCase__ : int = num_labels
UpperCamelCase__ : int = mask_feature_size
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCamelCase)
UpperCamelCase__ : Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCamelCase) > 0.5
).float()
UpperCamelCase__ : Optional[Any] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCamelCase) > 0.5).long()
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self) -> int:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self) -> List[Any]:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[int] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase) -> str:
UpperCamelCase__ : int = output.encoder_hidden_states
UpperCamelCase__ : Union[str, Any] = output.pixel_decoder_hidden_states
UpperCamelCase__ : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCamelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCamelCase) , config.decoder_config.decoder_layers)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False) -> List[str]:
with torch.no_grad():
UpperCamelCase__ : List[str] = MaskFormerModel(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
UpperCamelCase__ : Union[str, Any] = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase)
UpperCamelCase__ : Any = model(UpperCamelCase , output_hidden_states=UpperCamelCase)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCamelCase , UpperCamelCase)
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase) -> List[str]:
UpperCamelCase__ : Tuple = MaskFormerForInstanceSegmentation(config=UpperCamelCase)
model.to(UpperCamelCase)
model.eval()
def comm_check_on_output(UpperCamelCase):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
UpperCamelCase__ : Any = model(pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase)
UpperCamelCase__ : Optional[Any] = model(UpperCamelCase)
comm_check_on_output(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = model(
pixel_values=UpperCamelCase , pixel_mask=UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase)
comm_check_on_output(UpperCamelCase)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
UpperCamelCase_ = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Tuple = MaskFormerModelTester(self)
UpperCamelCase__ : Dict = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ , UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCamelCase)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def lowerCAmelCase__ ( self) -> List[str]:
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def lowerCAmelCase__ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def lowerCAmelCase__ ( self) -> Any:
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def lowerCAmelCase__ ( self) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def lowerCAmelCase__ ( self) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def lowerCAmelCase__ ( self) -> Union[str, Any]:
pass
def lowerCAmelCase__ ( self) -> str:
UpperCamelCase__ , UpperCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[Any] = model_class(UpperCamelCase)
UpperCamelCase__ : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCamelCase)
@slow
def lowerCAmelCase__ ( self) -> int:
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ : Union[str, Any] = MaskFormerModel.from_pretrained(UpperCamelCase)
self.assertIsNotNone(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = (self.model_tester.min_size,) * 2
UpperCamelCase__ : Optional[int] = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCamelCase),
'mask_labels': torch.randn((2, 10, *size) , device=UpperCamelCase),
'class_labels': torch.zeros(2 , 10 , device=UpperCamelCase).long(),
}
UpperCamelCase__ : List[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCamelCase)
UpperCamelCase__ : List[str] = model(**UpperCamelCase)
self.assertTrue(outputs.loss is not None)
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCamelCase , **UpperCamelCase , output_hidden_states=UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = model_class(UpperCamelCase).to(UpperCamelCase)
UpperCamelCase__ : List[Any] = model(**UpperCamelCase , output_attentions=UpperCamelCase)
self.assertTrue(outputs.attentions is not None)
def lowerCAmelCase__ ( self) -> int:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : Optional[Any] = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Union[str, Any] = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
UpperCamelCase__ : List[str] = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase).loss
loss.backward()
def lowerCAmelCase__ ( self) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ : Any = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : List[Any] = True
UpperCamelCase__ : int = True
UpperCamelCase__ : Optional[Any] = model_class(UpperCamelCase)
model.to(UpperCamelCase)
model.train()
UpperCamelCase__ : str = model(UpperCamelCase , mask_labels=UpperCamelCase , class_labels=UpperCamelCase)
UpperCamelCase__ : str = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ : Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCamelCase)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCAmelCase__ : Any = 1E-4
def _lowercase ( ) -> List[str]:
UpperCamelCase__ : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self) -> Optional[Any]:
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self) -> Dict:
UpperCamelCase__ : str = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : int = model(**UpperCamelCase)
UpperCamelCase__ : List[Any] = torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
UpperCamelCase__ : Optional[int] = torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]]).to(UpperCamelCase)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Any:
UpperCamelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Optional[Any] = prepare_img()
UpperCamelCase__ : Dict = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : str = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : Dict = model(**UpperCamelCase)
# masks_queries_logits
UpperCamelCase__ : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Optional[int] = [
[-1.373_7124, -1.772_4937, -1.936_4233],
[-1.597_7281, -1.986_7939, -2.152_3695],
[-1.579_5398, -1.926_9832, -2.09_3942],
]
UpperCamelCase__ : Tuple = torch.tensor(UpperCamelCase).to(UpperCamelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
# class_queries_logits
UpperCamelCase__ : Union[str, Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Optional[Any]:
UpperCamelCase__ : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Tuple = prepare_img()
UpperCamelCase__ : Tuple = image_processor(UpperCamelCase , return_tensors='pt').to(UpperCamelCase)
UpperCamelCase__ : List[Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(UpperCamelCase , (1, 3, 8_00, 10_88))
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(**UpperCamelCase)
# masks_queries_logits
UpperCamelCase__ : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
UpperCamelCase__ : Optional[Any] = [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
UpperCamelCase__ : int = torch.tensor(UpperCamelCase).to(UpperCamelCase)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
# class_queries_logits
UpperCamelCase__ : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
UpperCamelCase__ : Optional[Any] = torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]]).to(UpperCamelCase)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCamelCase , atol=UpperCamelCase))
def lowerCAmelCase__ ( self) -> Tuple:
UpperCamelCase__ : Union[str, Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCamelCase)
.eval()
)
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : Any = image_processor(
[np.zeros((3, 8_00, 13_33)), np.zeros((3, 8_00, 13_33))] , segmentation_maps=[np.zeros((3_84, 3_84)).astype(np.floataa), np.zeros((3_84, 3_84)).astype(np.floataa)] , return_tensors='pt' , )
UpperCamelCase__ : Dict = inputs['pixel_values'].to(UpperCamelCase)
UpperCamelCase__ : Optional[int] = [el.to(UpperCamelCase) for el in inputs['mask_labels']]
UpperCamelCase__ : Optional[int] = [el.to(UpperCamelCase) for el in inputs['class_labels']]
with torch.no_grad():
UpperCamelCase__ : Optional[int] = model(**UpperCamelCase)
self.assertTrue(outputs.loss is not None)
| 410
| 1
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
lowerCAmelCase_ : Union[str, Any] = True
from torch.cuda.amp import autocast
lowerCAmelCase_ : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
_lowerCAmelCase : Optional[bool] = field(
default=snake_case_ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
_lowerCAmelCase : Optional[bool] = field(
default=snake_case_ , metadata={'help': 'Whether to log verbose messages or not.'} , )
_lowerCAmelCase : Optional[float] = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
_lowerCAmelCase : Optional[float] = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
_lowerCAmelCase : Optional[float] = field(
default=0.99_9995 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCAmelCase ( A : ModelArguments , A : TrainingArguments ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
SCREAMING_SNAKE_CASE : List[Any] = logging.WARNING
if model_args.verbose_logging:
SCREAMING_SNAKE_CASE : Dict = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
SCREAMING_SNAKE_CASE : Any = logging.INFO
logger.setLevel(A )
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : str = field(
default=snake_case_ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_lowerCAmelCase : Optional[str] = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCAmelCase : Optional[str] = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCAmelCase : Optional[str] = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
_lowerCAmelCase : bool = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
_lowerCAmelCase : Optional[int] = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
_lowerCAmelCase : Optional[int] = field(
default=snake_case_ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
_lowerCAmelCase : Optional[float] = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class lowerCamelCase_ :
_lowerCAmelCase : WavaVecaForPreTraining
_lowerCAmelCase : WavaVecaFeatureExtractor
_lowerCAmelCase : Union[bool, str] = "longest"
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[int] = None
def __call__( self : Any , lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.feature_extractor.pad(
lowerCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE : Optional[Any] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
SCREAMING_SNAKE_CASE : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
SCREAMING_SNAKE_CASE : List[Any] = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
SCREAMING_SNAKE_CASE : List[str] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase__ , min_masks=2 , )
return batch
class lowerCamelCase_ ( snake_case_ ):
def __init__( self : str , *lowerCAmelCase__ : int , lowerCAmelCase__ : Any=1 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Tuple=1.0 , **lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
SCREAMING_SNAKE_CASE : List[Any] = max_gumbel_temp
SCREAMING_SNAKE_CASE : List[Any] = min_gumbel_temp
SCREAMING_SNAKE_CASE : Tuple = gumbel_temp_decay
def __lowercase ( self : str , lowerCAmelCase__ : nn.Module , lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ):
"""simple docstring"""
model.train()
SCREAMING_SNAKE_CASE : Optional[int] = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
SCREAMING_SNAKE_CASE : Any = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
SCREAMING_SNAKE_CASE : Any = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
SCREAMING_SNAKE_CASE : Tuple = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
SCREAMING_SNAKE_CASE : List[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def UpperCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(A , A )
# Downloading and loading a dataset from the hub.
SCREAMING_SNAKE_CASE : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE : Any = DatasetDict()
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
SCREAMING_SNAKE_CASE : str = DatasetDict()
SCREAMING_SNAKE_CASE : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
SCREAMING_SNAKE_CASE : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
SCREAMING_SNAKE_CASE : int = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=A )
def prepare_dataset(A : str ):
# check that all files have the correct sampling rate
SCREAMING_SNAKE_CASE : List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
SCREAMING_SNAKE_CASE : Optional[Any] = datasets.map(
A , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
SCREAMING_SNAKE_CASE : int = vectorized_datasets.filter(
lambda A : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(A : List[Any] ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
SCREAMING_SNAKE_CASE : Union[str, Any] = vectorized_datasets.map(
A , batched=A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
SCREAMING_SNAKE_CASE : Tuple = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
SCREAMING_SNAKE_CASE : str = WavaVecaForPreTraining(A )
SCREAMING_SNAKE_CASE : Optional[Any] = DataCollatorForWavaVecaPretraining(model=A , feature_extractor=A )
SCREAMING_SNAKE_CASE : Dict = WavaVecaPreTrainer(
model=A , data_collator=A , args=A , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=A , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 464
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.