code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
a__ : str = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Union[str, Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
a__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 349
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = 1
__UpperCamelCase = 3
__UpperCamelCase = (3_2, 3_2)
__UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowercase )
return image
@property
def __lowerCamelCase ( self ) -> Dict:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
return model
@property
def __lowerCamelCase ( self ) -> List[str]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
__UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(lowercase )
@property
def __lowerCamelCase ( self ) -> Tuple:
def extract(*lowercase , **lowercase ):
class UpperCAmelCase__ :
def __init__( self ) -> Tuple:
__UpperCamelCase = torch.ones([0] )
def __lowerCamelCase ( self , lowercase ) -> List[str]:
self.pixel_values.to(lowercase )
return self
return Out()
return extract
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=lowercase , set_alpha_to_one=lowercase , )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe([prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
__UpperCamelCase = output.images
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(0 )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=lowercase , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Union[str, Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=lowercase )
assert isinstance(lowercase , lowercase )
assert isinstance(pipe.scheduler , lowercase )
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase )
__UpperCamelCase = StableDiffusionPipeline.from_pretrained(lowercase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__UpperCamelCase = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __lowerCamelCase ( self ) -> Optional[int]:
__UpperCamelCase = self.dummy_cond_unet
__UpperCamelCase = PNDMScheduler(skip_prk_steps=lowercase )
__UpperCamelCase = self.dummy_vae
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
__UpperCamelCase = unet.half()
__UpperCamelCase = vae.half()
__UpperCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
__UpperCamelCase = StableDiffusionPipeline(
unet=lowercase , scheduler=lowercase , vae=lowercase , text_encoder=lowercase , tokenizer=lowercase , safety_checker=lowercase , feature_extractor=self.dummy_extractor , )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """A painting of a squirrel eating a burger"""
__UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> Dict:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
__UpperCamelCase = 4_0_0_3_6_6_0_3_4_6
__UpperCamelCase = 7
# without safety guidance (sld_guidance_scale = 0)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=lowercase )
__UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity"""
__UpperCamelCase = 2_7_3_4_9_7_1_7_5_5
__UpperCamelCase = 7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
__UpperCamelCase = sd_pipe.to(lowercase )
sd_pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
__UpperCamelCase = 1_0_4_4_3_5_5_2_3_4
__UpperCamelCase = 1_2
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = sd_pipe(
[prompt] , generator=lowercase , guidance_scale=lowercase , num_inference_steps=5_0 , output_type="""np""" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__UpperCamelCase = output.images
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 349
| 1
|
"""simple docstring"""
from copy import deepcopy
class __lowerCamelCase :
'''simple docstring'''
def __init__( self : List[str] , a_ : list[int] | None = None , a_ : int | None = None ):
if arr is None and size is not None:
lowerCAmelCase_ : str = size
lowerCAmelCase_ : int = [0] * size
elif arr is not None:
self.init(a_ )
else:
raise ValueError("Either arr or size must be specified" )
def lowerCamelCase ( self : Tuple , a_ : list[int] ):
lowerCAmelCase_ : Union[str, Any] = len(a_ )
lowerCAmelCase_ : str = deepcopy(a_ )
for i in range(1 , self.size ):
lowerCAmelCase_ : Any = self.next_(a_ )
if j < self.size:
self.tree[j] += self.tree[i]
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : str = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
lowerCAmelCase_ : List[str] = self.next_(a_ )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def lowerCamelCase ( a_ : int ):
return index + (index & (-index))
@staticmethod
def lowerCamelCase ( a_ : int ):
return index - (index & (-index))
def lowerCamelCase ( self : Tuple , a_ : int , a_ : int ):
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
lowerCAmelCase_ : Any = self.next_(a_ )
def lowerCamelCase ( self : str , a_ : int , a_ : int ):
self.add(a_ , value - self.get(a_ ) )
def lowerCamelCase ( self : Tuple , a_ : int ):
if right == 0:
return 0
lowerCAmelCase_ : Union[str, Any] = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
lowerCAmelCase_ : Tuple = self.prev(a_ )
return result
def lowerCamelCase ( self : List[str] , a_ : int , a_ : int ):
return self.prefix(a_ ) - self.prefix(a_ )
def lowerCamelCase ( self : str , a_ : int ):
return self.query(a_ , index + 1 )
def lowerCamelCase ( self : Tuple , a_ : int ):
value -= self.tree[0]
if value < 0:
return -1
lowerCAmelCase_ : Union[str, Any] = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
lowerCAmelCase_ : Optional[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase = 50 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 161
| 0
|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a__ : List[str] = logging.get_logger(__name__)
class a_ ( a__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = True , ) ->Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(_lowerCamelCase , _lowerCamelCase , self.nets ) ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = controlnet(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
# merge samples
if i == 0:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = down_samples, mid_sample
else:
SCREAMING_SNAKE_CASE : Tuple = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_lowerCamelCase , _lowerCamelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = None , ) ->List[str]:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_lowerCamelCase , is_main_process=_lowerCamelCase , save_function=_lowerCamelCase , safe_serialization=_lowerCamelCase , variant=_lowerCamelCase , )
idx += 1
SCREAMING_SNAKE_CASE : Optional[int] = model_path_to_save + F"""_{idx}"""
@classmethod
def __lowerCAmelCase ( cls , _lowerCamelCase , **_lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : List[Any] = 0
SCREAMING_SNAKE_CASE : List[str] = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
SCREAMING_SNAKE_CASE : Optional[Any] = pretrained_model_path
while os.path.isdir(_lowerCamelCase ):
SCREAMING_SNAKE_CASE : str = ControlNetModel.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
controlnets.append(_lowerCamelCase )
idx += 1
SCREAMING_SNAKE_CASE : Union[str, Any] = pretrained_model_path + F"""_{idx}"""
logger.info(F"""{len(_lowerCamelCase )} controlnets loaded from {pretrained_model_path}.""" )
if len(_lowerCamelCase ) == 0:
raise ValueError(
F"""No ControlNets found under {os.path.dirname(_lowerCamelCase )}. Expected at least {pretrained_model_path + "_0"}.""" )
return cls(_lowerCamelCase )
| 313
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a__ : Dict = logging.get_logger(__name__)
a__ : Dict = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : str = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a__ : Optional[int] = {
'''allenai/led-base-16384''': 16_384,
}
class a_ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : Union[str, Any] = LEDTokenizer
__SCREAMING_SNAKE_CASE : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Union[str, Any]:
super().__init__(
_lowerCamelCase , _lowerCamelCase , tokenizer_file=_lowerCamelCase , errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , unk_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , trim_offsets=_lowerCamelCase , **_lowerCamelCase , )
SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : str = getattr(_lowerCamelCase , pre_tok_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : Optional[int] = add_prefix_space
SCREAMING_SNAKE_CASE : str = pre_tok_class(**_lowerCamelCase )
SCREAMING_SNAKE_CASE : Any = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
SCREAMING_SNAKE_CASE : List[Any] = '''post_processor'''
SCREAMING_SNAKE_CASE : int = getattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
if tokenizer_component_instance:
SCREAMING_SNAKE_CASE : Any = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
SCREAMING_SNAKE_CASE : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
SCREAMING_SNAKE_CASE : Optional[Any] = tuple(state['''cls'''] )
SCREAMING_SNAKE_CASE : Any = False
if state.get('''add_prefix_space''' , _lowerCamelCase ) != add_prefix_space:
SCREAMING_SNAKE_CASE : Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if state.get('''trim_offsets''' , _lowerCamelCase ) != trim_offsets:
SCREAMING_SNAKE_CASE : List[Any] = trim_offsets
SCREAMING_SNAKE_CASE : Union[str, Any] = True
if changes_to_apply:
SCREAMING_SNAKE_CASE : List[str] = getattr(_lowerCamelCase , state.pop('''type''' ) )
SCREAMING_SNAKE_CASE : List[Any] = component_class(**_lowerCamelCase )
setattr(self.backend_tokenizer , _lowerCamelCase , _lowerCamelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def __lowerCAmelCase ( self ) ->str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : str = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else value
SCREAMING_SNAKE_CASE : List[Any] = value
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : Tuple = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , *_lowerCamelCase , **_lowerCamelCase ) ->BatchEncoding:
SCREAMING_SNAKE_CASE : List[Any] = kwargs.get('''is_split_into_words''' , _lowerCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*_lowerCamelCase , **_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->Tuple[str]:
SCREAMING_SNAKE_CASE : Any = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase )
return tuple(_lowerCamelCase )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=None ) ->Any:
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None ) ->List[int]:
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
SCREAMING_SNAKE_CASE : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase = None , _lowerCamelCase = None , ) ->dict:
SCREAMING_SNAKE_CASE : Tuple = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
SCREAMING_SNAKE_CASE : Optional[Any] = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
SCREAMING_SNAKE_CASE : int = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
SCREAMING_SNAKE_CASE : Tuple = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE : int = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
SCREAMING_SNAKE_CASE : str = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
SCREAMING_SNAKE_CASE : Optional[Any] = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 313
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int =ViTImageProcessor if is_vision_available() else None
@property
def snake_case ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ):
__lowerCAmelCase = (3, 32, 1_28)
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
__lowerCAmelCase = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 1_28},
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__a , __a )
def snake_case ( self , **__a ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self , **__a ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__a )
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
__lowerCAmelCase = Image.fromarray(np.moveaxis(__a , 0 , -1 ) )
return image_input
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=__a )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowerCAmelCase = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , __a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "test"
__lowerCAmelCase = processor(text=__a )
__lowerCAmelCase = tokenizer(__a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = "test"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(__a ):
processor()
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.char_decode(__a )
__lowerCAmelCase = tokenizer.batch_decode(__a )
__lowerCAmelCase = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(__a , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = None
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__a , images=__a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = MgpstrProcessor(tokenizer=__a , image_processor=__a )
__lowerCAmelCase = torch.randn(1 , 27 , 38 )
__lowerCAmelCase = torch.randn(1 , 27 , 5_02_57 )
__lowerCAmelCase = torch.randn(1 , 27 , 3_05_22 )
__lowerCAmelCase = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 259
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )]
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(_UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(_UpperCamelCase ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(_UpperCamelCase )
__lowerCAmelCase = ["".join(_UpperCamelCase ) for row in temp_grid]
__lowerCAmelCase = "".join(_UpperCamelCase )
return output_string
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
__lowerCAmelCase = [[] for _ in range(_UpperCamelCase )] # generates template
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
__lowerCAmelCase = 0
for row in temp_grid: # fills in the characters
__lowerCAmelCase = input_string[counter : counter + len(_UpperCamelCase )]
grid.append(list(_UpperCamelCase ) )
counter += len(_UpperCamelCase )
__lowerCAmelCase = "" # reads as zigzag
for position in range(len(_UpperCamelCase ) ):
__lowerCAmelCase = position % (lowest * 2) # puts it in bounds
__lowerCAmelCase = min(_UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {}
for key_guess in range(1 , len(_UpperCamelCase ) ): # tries every key
__lowerCAmelCase = decrypt(_UpperCamelCase , _UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : List[str] = '▁'
A : str = {'vocab_file': 'spiece.model'}
A : Dict = {
'vocab_file': {'google/pegasus-xsum': 'https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'}
}
A : Tuple = {
'google/pegasus-xsum': 5_1_2,
}
A : Tuple = logging.get_logger(__name__)
class __A( a ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , _snake_case , _snake_case="<pad>" , _snake_case="</s>" , _snake_case="<unk>" , _snake_case="<mask_2>" , _snake_case="<mask_1>" , _snake_case=None , _snake_case=103 , _snake_case = None , **_snake_case , ) -> None:
'''simple docstring'''
__a = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case , _snake_case ):
raise TypeError(
F"""additional_special_tokens should be of type {type(_snake_case )}, but is"""
F""" {type(_snake_case )}""" )
__a = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F"""<unk_{i}>""" for i in range(len(_snake_case ) , self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
'''Please make sure that the provided additional_special_tokens do not contain an incorrectly'''
F""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
__a = additional_special_tokens_extended
else:
__a = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F"""<unk_{i}>""" for i in range(2 , self.offset )]
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , mask_token=_snake_case , pad_token=_snake_case , mask_token_sent=_snake_case , offset=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , **_snake_case , )
__a = mask_token_sent
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_snake_case )
# add special tokens to encoder dict
__a = {
0: self.pad_token,
1: self.eos_token,
}
if self.mask_token_sent is not None:
self.encoder.update(
{
2: self.mask_token_sent,
3: self.mask_token,
} )
if self.offset > 0:
# entries 2-104 are only used for pretraining and called <mask_1>, <mask_2>, unk_2, ...unk_102
# mask_token_sent is already added to list -> so start at 1
self.encoder.update({i + 3: additional_special_tokens[i] for i in range(1 , self.offset - 1 )} )
__a = {v: k for k, v in self.encoder.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> int:
'''simple docstring'''
return len(self.sp_model ) + self.offset
def SCREAMING_SNAKE_CASE_ ( self ) -> Dict[str, int]:
'''simple docstring'''
__a = {self.convert_ids_to_tokens(_snake_case ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
'''simple docstring'''
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _snake_case ) -> Optional[Any]:
'''simple docstring'''
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(_snake_case , out_type=_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> int:
'''simple docstring'''
if token in self.decoder:
return self.decoder[token]
elif token in self.added_tokens_decoder:
return self.added_tokens_decoder[token]
__a = self.sp_model.piece_to_id(_snake_case )
return sp_id + self.offset
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> str:
'''simple docstring'''
if index in self.encoder:
return self.encoder[index]
elif index in self.added_tokens_encoder:
return self.added_tokens_encoder[index]
else:
__a = self.sp_model.IdToPiece(index - self.offset )
return token
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Any:
'''simple docstring'''
__a = []
__a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_snake_case ) + token
__a = []
else:
current_sub_tokens.append(_snake_case )
out_string += self.sp_model.decode(_snake_case )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self , _snake_case=False ) -> Any:
'''simple docstring'''
return 1
def SCREAMING_SNAKE_CASE_ ( self , _snake_case ) -> Optional[int]:
'''simple docstring'''
__a = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
return [1 if x in all_special_ids else 0 for x in seq]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case=None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _snake_case )
elif not os.path.isfile(self.vocab_file ):
with open(_snake_case , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_snake_case )
return (out_vocab_file,)
| 6
|
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_a = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def a__ ( self , _a , _a , _a ) -> int:
_A : str = TextaTextGenerationPipeline(model=_a , tokenizer=_a )
return generator, ["Something to write", "Something else"]
def a__ ( self , _a , _a ) -> Dict:
_A : Any = generator("""Something there""" )
self.assertEqual(_a , [{"""generated_text""": ANY(_a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
_A : List[Any] = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
_A : Optional[int] = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=_a )
self.assertEqual(
_a , [
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
[{"""generated_text""": ANY(_a )}, {"""generated_text""": ANY(_a )}],
] , )
with self.assertRaises(_a ):
generator(4 )
@require_torch
def a__ ( self ) -> List[str]:
_A : Any = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
_A : Dict = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
_A : Any = 3
_A : Any = generator(
"""Something there""" , num_return_sequences=_a , num_beams=_a , )
_A : Optional[int] = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(_a , _a )
_A : Dict = generator("""This is a test""" , do_sample=_a , num_return_sequences=2 , return_tensors=_a )
self.assertEqual(
_a , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
_A : Dict = generator.model.config.eos_token_id
_A : List[str] = """<pad>"""
_A : Dict = generator(
["""This is a test""", """This is a second test"""] , do_sample=_a , num_return_sequences=2 , batch_size=2 , return_tensors=_a , )
self.assertEqual(
_a , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def a__ ( self ) -> int:
_A : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
_A : str = generator("""Something there""" , do_sample=_a )
self.assertEqual(_a , [{"""generated_text""": """"""}] )
| 26
| 0
|
from torch import nn
def a( A : Optional[Any] ) -> str:
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 71
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowercase: Optional[Any] = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_lowercase: Union[str, Any] = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_lowercase: Union[str, Any] = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_lowercase: List[Any] = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_lowercase: List[Any] = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Value("string" ),
} ) , homepage="https://github.com/openai/human-eval" , codebase_urls=["https://github.com/openai/human-eval"] , reference_urls=["https://github.com/openai/human-eval"] , license=_LICENSE , )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=[1, 10, 100] , lowerCamelCase_=4 , lowerCamelCase_=3.0 ):
"""simple docstring"""
if os.getenv("HF_ALLOW_CODE_EVAL" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("This metric is currently not supported on Windows." )
with ThreadPoolExecutor(max_workers=lowerCamelCase_ ) as executor:
a = []
a = Counter()
a = 0
a = defaultdict(lowerCamelCase_ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase_ , lowerCamelCase_ ) ):
for candidate in candidates:
a = candidate + "\n" + test_case
a = (test_program, timeout, task_id, completion_id[task_id])
a = executor.submit(lowerCamelCase_ , *lowerCamelCase_ )
futures.append(lowerCamelCase_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase_ ):
a = future.result()
results[result["task_id"]].append((result["completion_id"], result) )
a , a = [], []
for result in results.values():
result.sort()
a = [r[1]["passed"] for r in result]
total.append(len(lowerCamelCase_ ) )
correct.append(sum(lowerCamelCase_ ) )
a = np.array(lowerCamelCase_ )
a = np.array(lowerCamelCase_ )
a = k
a = {F'''pass@{k}''': estimate_pass_at_k(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a( A : Optional[Any] , A : str , A : Dict ) -> Optional[int]:
"""simple docstring"""
def estimator(A : int , A : int , A : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(A , A ):
a = itertools.repeat(A , len(A ) )
else:
assert len(A ) == len(A )
a = iter(A )
return np.array([estimator(int(A ) , int(A ) , A ) for n, c in zip(A , A )] )
| 71
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase =Dict[str, Any]
__UpperCAmelCase =List[Prediction]
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , *a : int , **a : List[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
if self.framework == "tf":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **a : str ):
"""simple docstring"""
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : str , *a : str , **a : Union[str, Any] ):
"""simple docstring"""
return super().__call__(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = torch.IntTensor([[image.height, image.width]] )
__lowerCamelCase = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
__lowerCamelCase = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
__lowerCamelCase = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self : str , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = model_inputs.pop('''target_size''' )
__lowerCamelCase = self.model(**a )
__lowerCamelCase = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCamelCase = model_inputs['''bbox''']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : int , a : List[str]=0.9 ):
"""simple docstring"""
__lowerCamelCase = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCamelCase , __lowerCamelCase = target_size[0].tolist()
def unnormalize(a : Any ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCamelCase , __lowerCamelCase = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCamelCase = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCamelCase = [unnormalize(a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [dict(zip(a , a ) ) for vals in zip(scores.tolist() , a , a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCamelCase = self.image_processor.post_process_object_detection(a , a , a )
__lowerCamelCase = raw_annotations[0]
__lowerCamelCase = raw_annotation['''scores''']
__lowerCamelCase = raw_annotation['''labels''']
__lowerCamelCase = raw_annotation['''boxes''']
__lowerCamelCase = scores.tolist()
__lowerCamelCase = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCamelCase = [self._get_bounding_box(a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCamelCase = ['''score''', '''label''', '''box''']
__lowerCamelCase = [
dict(zip(a , a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : "torch.Tensor" ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 67
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 300
|
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = {}
def snake_case__ ( self, __a):
'''simple docstring'''
if vertex not in self.adjacency:
_lowerCAmelCase : List[Any] = {}
self.num_vertices += 1
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
self.add_vertex(__a)
self.add_vertex(__a)
if head == tail:
return
_lowerCAmelCase : Dict = weight
_lowerCAmelCase : Dict = weight
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
edges.remove((tail, head, weight))
for i in range(len(__a)):
_lowerCAmelCase : Optional[int] = list(edges[i])
edges.sort(key=lambda __a: e[2])
for i in range(len(__a) - 1):
if edges[i][2] >= edges[i + 1][2]:
_lowerCAmelCase : Tuple = edges[i][2] + 1
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = edge
_lowerCAmelCase : Union[str, Any] = weight
_lowerCAmelCase : Optional[int] = weight
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = ""
for tail in self.adjacency:
for head in self.adjacency[tail]:
_lowerCAmelCase : List[Any] = self.adjacency[head][tail]
string += f"{head} -> {tail} == {weight}\n"
return string.rstrip("\n")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]))
return output
def snake_case__ ( self):
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def snake_case__ ( __a=None, __a=None):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Graph()
if vertices is None:
_lowerCAmelCase : Any = []
if edges is None:
_lowerCAmelCase : Any = []
for vertex in vertices:
g.add_vertex(__a)
for edge in edges:
g.add_edge(*__a)
return g
class UpperCAmelCase_ :
def __init__( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = {}
def __len__( self):
'''simple docstring'''
return len(self.parent)
def snake_case__ ( self, __a):
'''simple docstring'''
if item in self.parent:
return self.find(__a)
_lowerCAmelCase : Optional[int] = item
_lowerCAmelCase : Any = 0
return item
def snake_case__ ( self, __a):
'''simple docstring'''
if item not in self.parent:
return self.make_set(__a)
if item != self.parent[item]:
_lowerCAmelCase : Any = self.find(self.parent[item])
return self.parent[item]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.find(__a)
_lowerCAmelCase : List[str] = self.find(__a)
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
_lowerCAmelCase : Any = roota
return roota
if self.rank[roota] < self.rank[roota]:
_lowerCAmelCase : List[Any] = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
_lowerCAmelCase : int = roota
return roota
return None
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = graph.num_vertices
_lowerCAmelCase : Optional[int] = Graph.UnionFind()
_lowerCAmelCase : str = []
while num_components > 1:
_lowerCAmelCase : List[str] = {}
for vertex in graph.get_vertices():
_lowerCAmelCase : Optional[Any] = -1
_lowerCAmelCase : Union[str, Any] = graph.get_edges()
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = edge
edges.remove((tail, head, weight))
for edge in edges:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = edge
_lowerCAmelCase : Dict = union_find.find(__a)
_lowerCAmelCase : Optional[Any] = union_find.find(__a)
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Union[str, Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
_lowerCAmelCase : Tuple = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = cheap_edge[vertex]
if union_find.find(__a) != union_find.find(__a):
union_find.union(__a, __a)
mst_edges.append(cheap_edge[vertex])
_lowerCAmelCase : Any = num_components - 1
_lowerCAmelCase : List[str] = Graph.build(edges=__a)
return mst
| 300
| 1
|
"""simple docstring"""
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Union[str, Any] = 1
for i in range(1 ,num + 1 ):
fact *= i
return fact
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : List[str] = 0
while number > 0:
__lowerCAmelCase : Optional[int] = number % 10
sum_of_digits += last_digit
__lowerCAmelCase : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _lowercase ( __snake_case = 100 ) -> int:
__lowerCAmelCase : str = factorial(__snake_case )
__lowerCAmelCase : Optional[int] = split_and_add(__snake_case )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 269
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ : List[Any] = logging.get_logger(__name__)
a__ : Union[str, Any] = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_vision_model'
def __init__( self :List[str] , _A :str=1_408 , _A :List[str]=6_144 , _A :List[Any]=39 , _A :Optional[Any]=16 , _A :Tuple=224 , _A :Tuple=14 , _A :Tuple="gelu" , _A :Optional[Any]=1E-6 , _A :List[Any]=0.0 , _A :Dict=1E-10 , _A :List[str]=True , **_A :Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**_A )
__A = hidden_size
__A = intermediate_size
__A = num_hidden_layers
__A = num_attention_heads
__A = patch_size
__A = image_size
__A = initializer_range
__A = attention_dropout
__A = layer_norm_eps
__A = hidden_act
__A = qkv_bias
@classmethod
def lowercase_ ( cls :Any , _A :Union[str, os.PathLike] , **_A :Tuple ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : List[str] = 'instructblip_qformer'
def __init__( self :Tuple , _A :int=30_522 , _A :List[str]=768 , _A :str=12 , _A :Optional[Any]=12 , _A :Union[str, Any]=3_072 , _A :str="gelu" , _A :Tuple=0.1 , _A :Dict=0.1 , _A :Dict=512 , _A :Union[str, Any]=0.02 , _A :int=1E-12 , _A :str=0 , _A :Union[str, Any]="absolute" , _A :List[str]=2 , _A :Optional[Any]=1_408 , **_A :Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_A , **_A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = cross_attention_frequency
__A = encoder_hidden_size
@classmethod
def lowercase_ ( cls :int , _A :Union[str, os.PathLike] , **_A :int ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_A )
__A , __A = cls.get_config_dict(_A , **_A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
__A = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
UpperCAmelCase__ : Any = 'instructblip'
UpperCAmelCase__ : List[Any] = True
def __init__( self :Dict , _A :int=None , _A :Optional[Any]=None , _A :Optional[Any]=None , _A :Optional[Any]=32 , **_A :List[Any] ) -> Tuple:
'''simple docstring'''
super().__init__(**_A )
if vision_config is None:
__A = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
__A = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
__A = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
__A = InstructBlipVisionConfig(**_A )
__A = InstructBlipQFormerConfig(**_A )
__A = text_config['model_type'] if 'model_type' in text_config else 'opt'
__A = CONFIG_MAPPING[text_model_type](**_A )
__A = self.text_config.tie_word_embeddings
__A = self.text_config.is_encoder_decoder
__A = num_query_tokens
__A = self.vision_config.hidden_size
__A = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__A = 1.0
__A = 0.02
@classmethod
def lowercase_ ( cls :int , _A :InstructBlipVisionConfig , _A :InstructBlipQFormerConfig , _A :PretrainedConfig , **_A :Any , ) -> Any:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **_A , )
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = copy.deepcopy(self.__dict__ )
__A = self.vision_config.to_dict()
__A = self.qformer_config.to_dict()
__A = self.text_config.to_dict()
__A = self.__class__.model_type
return output
| 161
| 0
|
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1, A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( A__ ):
if len(A__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[0:mid]
SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:]
SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ : Any = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ : Dict = _count_cross_inversions(A__, A__ )
SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ', A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_ : int = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ : List[Any] = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
if __name__ == "__main__":
main()
| 350
|
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = len(A__ )
for i in range(n - 1 ):
for j in range(i + 1, A__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a__ ( A__ ):
if len(A__ ) <= 1:
return arr, 0
SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ) // 2
SCREAMING_SNAKE_CASE_ : Union[str, Any] = arr[0:mid]
SCREAMING_SNAKE_CASE_ : List[str] = arr[mid:]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = count_inversions_recursive(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = _count_cross_inversions(A__, A__ )
SCREAMING_SNAKE_CASE_ : str = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a__ ( A__, A__ ):
SCREAMING_SNAKE_CASE_ : List[str] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
while i < len(A__ ) and j < len(A__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(A__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(A__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
SCREAMING_SNAKE_CASE_ : Optional[int] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ', A__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
SCREAMING_SNAKE_CASE_ : int = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
# an empty list should also have zero inversions
SCREAMING_SNAKE_CASE_ : Optional[int] = []
SCREAMING_SNAKE_CASE_ : List[str] = count_inversions_bf(A__ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = count_inversions_recursive(A__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ', A__ )
if __name__ == "__main__":
main()
| 162
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-classification/requirements.txt""")
__snake_case = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _A ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , '''rb''' ) as f:
UpperCamelCase :List[str] = Image.open(SCREAMING_SNAKE_CASE__ )
return im.convert('''RGB''' )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
}, )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ : Optional[str] =field(default=lowercase, metadata={'help': 'A folder containing the training data.'} )
UpperCamelCase_ : Optional[str] =field(default=lowercase, metadata={'help': 'A folder containing the validation data.'} )
UpperCamelCase_ : Optional[float] =field(
default=0.15, metadata={'help': 'Percent to split off of train for validation.'} )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
UpperCamelCase_ : Optional[int] =field(
default=lowercase, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def UpperCAmelCase ( self ) -> int:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ : str =field(
default='google/vit-base-patch16-224-in21k', metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'}, )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(lowercase )}, )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ : Optional[str] =field(
default=lowercase, metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
UpperCamelCase_ : str =field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
UpperCamelCase_ : str =field(default=lowercase, metadata={'help': 'Name or path of preprocessor config.'} )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
UpperCamelCase_ : bool =field(
default=lowercase, metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'}, )
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :Dict = torch.stack([example['''pixel_values'''] for example in examples] )
UpperCamelCase :Optional[int] = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCamelCase :int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase :str = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase :Any = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase :Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase :Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
UpperCamelCase :int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCamelCase :List[Any] = {}
if data_args.train_dir is not None:
UpperCamelCase :Union[str, Any] = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
UpperCamelCase :Union[str, Any] = os.path.join(data_args.validation_dir , '''**''' )
UpperCamelCase :Tuple = load_dataset(
'''imagefolder''' , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCamelCase :Dict = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE__ ) and data_args.train_val_split > 0.0:
UpperCamelCase :Union[str, Any] = dataset['''train'''].train_test_split(data_args.train_val_split )
UpperCamelCase :Dict = split['''train''']
UpperCamelCase :List[Any] = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase :Any = dataset['''train'''].features['''labels'''].names
UpperCamelCase , UpperCamelCase :List[str] = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :List[Any] = str(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = label
# Load the accuracy metric from the datasets package
UpperCamelCase :Optional[Any] = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ : Dict ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
UpperCamelCase :Dict = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase :Optional[Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
UpperCamelCase :int = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
UpperCamelCase :Tuple = image_processor.size['''shortest_edge''']
else:
UpperCamelCase :str = (image_processor.size['''height'''], image_processor.size['''width'''])
UpperCamelCase :Tuple = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
UpperCamelCase :Optional[int] = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
UpperCamelCase :str = Compose(
[
Resize(SCREAMING_SNAKE_CASE__ ),
CenterCrop(SCREAMING_SNAKE_CASE__ ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
UpperCamelCase :str = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE__ : str ):
UpperCamelCase :Any = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
UpperCamelCase :List[Any] = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
UpperCamelCase :Tuple = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE__ )
# Initalize our trainer
UpperCamelCase :int = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
UpperCamelCase :Any = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase :Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase :Union[str, Any] = last_checkpoint
UpperCamelCase :int = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase :List[Any] = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE__ )
# Write model card and (optionally) push to hub
UpperCamelCase :int = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 259
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__snake_case = logging.getLogger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Dict=3 , SCREAMING_SNAKE_CASE__ : Any=16 , SCREAMING_SNAKE_CASE__ : int = 10 , SCREAMING_SNAKE_CASE__ : int = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE__ : List[Any] ):
UpperCamelCase :Union[str, Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(SCREAMING_SNAKE_CASE__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
UpperCamelCase :str = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = get_dataset(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
UpperCamelCase :Any = DataLoader(SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , batch_size=SCREAMING_SNAKE_CASE__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any=None ):
UpperCamelCase :Dict = []
for epoch in range(SCREAMING_SNAKE_CASE__ ):
# Train quickly
model.train()
for batch in dataloader:
UpperCamelCase , UpperCamelCase :Optional[Any] = batch
UpperCamelCase :int = model(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Optional[int] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
accelerator.backward(SCREAMING_SNAKE_CASE__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> str:
super().__init__()
UpperCamelCase :Optional[int] = nn.Parameter(torch.randn(1 ) )
UpperCamelCase :int = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return x * self.a + self.b
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Tuple = ProjectConfiguration(total_limit=1 , project_dir=SCREAMING_SNAKE_CASE_ , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Dict = Accelerator(project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Union[str, Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def UpperCAmelCase ( self ) -> str:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[str] = DummyModel()
UpperCamelCase :Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Dict = dummy_dataloaders()
# Train baseline
UpperCamelCase :Dict = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
UpperCamelCase :int = os.path.join(SCREAMING_SNAKE_CASE_ , '''initial''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[int] = optimizer.state_dict()
UpperCamelCase :Optional[int] = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Any = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :List[Any] = dummy_dataloaders()
UpperCamelCase :List[str] = Accelerator()
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Tuple = model.a.item(), model.b.item()
UpperCamelCase :Tuple = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[int] = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoint''' )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
# Load everything back in and make sure all states work
accelerator.load_state(SCREAMING_SNAKE_CASE_ )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Union[str, Any] = model.a.item(), model.b.item()
UpperCamelCase :Optional[Any] = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> List[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :Optional[int] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :int = dummy_dataloaders()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Optional[Any] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
((UpperCamelCase) , (UpperCamelCase)) :List[str] = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
UpperCamelCase :Any = train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[int] = model.a.item(), model.b.item()
UpperCamelCase :Any = optimizer.state_dict()
# Train partially
set_seed(42 )
UpperCamelCase :Union[str, Any] = DummyModel()
UpperCamelCase :List[Any] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase , UpperCamelCase :Tuple = dummy_dataloaders()
UpperCamelCase :Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Union[str, Any] = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
((UpperCamelCase) , (UpperCamelCase)) :Dict = model.a.item(), model.b.item()
UpperCamelCase :Dict = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Any = train(2 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , (UpperCamelCase)) :Optional[Any] = model.a.item(), model.b.item()
UpperCamelCase :str = optimizer.state_dict()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCamelCase :List[Any] = torch.tensor([1, 2, 3] )
UpperCamelCase :Any = torch.tensor([2, 3, 4] )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :Optional[Any] = torch.optim.Adam(net.parameters() )
UpperCamelCase :Optional[Any] = Accelerator()
with self.assertRaises(SCREAMING_SNAKE_CASE_ ) as ve:
accelerator.register_for_checkpointing(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase :Optional[Any] = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def UpperCAmelCase ( self ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :List[Any] = DummyModel()
UpperCamelCase :List[str] = torch.optim.Adam(params=model.parameters() , lr=1e-3 )
UpperCamelCase :Any = torch.optim.lr_scheduler.StepLR(SCREAMING_SNAKE_CASE_ , step_size=1 , gamma=0.99 )
UpperCamelCase , UpperCamelCase :Any = dummy_dataloaders()
UpperCamelCase :Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ )
# Train baseline
UpperCamelCase :str = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase :Tuple = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save initial
accelerator.save_state()
UpperCamelCase :int = scheduler.state_dict()
train(3 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , scheduler.state_dict() )
def UpperCAmelCase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
UpperCamelCase :Optional[Any] = DummyModel()
UpperCamelCase :int = ProjectConfiguration(automatic_checkpoint_naming=SCREAMING_SNAKE_CASE_ , total_limit=2 )
# Train baseline
UpperCamelCase :Tuple = Accelerator(project_dir=SCREAMING_SNAKE_CASE_ , project_config=SCREAMING_SNAKE_CASE_ )
UpperCamelCase :List[str] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(SCREAMING_SNAKE_CASE_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :int = ['''torchrun''', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy() )
if __name__ == "__main__":
__snake_case = """/tmp/accelerate/state_checkpointing"""
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__snake_case = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__snake_case = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__snake_case , __snake_case = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
__snake_case = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
__snake_case = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 259
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True )-> List[str]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
snake_case_ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
snake_case_ = config_class.from_json_file(SCREAMING_SNAKE_CASE )
snake_case_ = True
snake_case_ = True
print(f'''Building TensorFlow model from configuration: {config}''' )
snake_case_ = model_class(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
snake_case_ = cached_file(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
snake_case_ = load_pytorch_checkpoint_in_tfa_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if compare_with_pt_model:
snake_case_ = tf_model(tf_model.dummy_inputs , training=SCREAMING_SNAKE_CASE ) # build the network
snake_case_ = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
snake_case_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
with torch.no_grad():
snake_case_ = pt_model(**pt_model.dummy_inputs )
snake_case_ = pto[0].numpy()
snake_case_ = tfo[0].numpy()
snake_case_ = np.amax(np.abs(np_pt - np_tf ) )
print(f'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, f'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(f'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(SCREAMING_SNAKE_CASE , save_format='''h5''' )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , )-> str:
"""simple docstring"""
if args_model_type is None:
snake_case_ = list(MODEL_CLASSES.keys() )
else:
snake_case_ = [args_model_type]
for j, model_type in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
print('''=''' * 100 )
print(f''' Converting model type {j}/{len(SCREAMING_SNAKE_CASE )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
snake_case_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
snake_case_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
snake_case_ = model_shortcut_name
elif only_convert_finetuned_models:
print(f''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
f''' Converting checkpoint {i}/{len(SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
snake_case_ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
snake_case_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
snake_case_ = cached_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , force_download=not use_cached_models )
else:
snake_case_ = model_shortcut_name
if os.path.isfile(SCREAMING_SNAKE_CASE ):
snake_case_ = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=SCREAMING_SNAKE_CASE , config_file=SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(SCREAMING_SNAKE_CASE , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=SCREAMING_SNAKE_CASE , )
if remove_cached_files:
os.remove(SCREAMING_SNAKE_CASE )
os.remove(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
UpperCAmelCase = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267
|
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
snake_case_ = ['''a''', '''b''', '''c''']
# Defaults to last layer if both are None
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''c'''] )
self.assertEqual(_UpperCAmelCase , [2] )
# Out indices set to match out features
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(['''a''', '''c'''] , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features set to match out indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [0, 2] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [0, 2] )
# Out features selected from negative indices
snake_case_ , snake_case_ = get_aligned_output_features_output_indices(_UpperCAmelCase , [-3, -1] , _UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , ['''a''', '''c'''] )
self.assertEqual(_UpperCAmelCase , [-3, -1] )
def UpperCamelCase__ ( self ):
# Stage names must be set
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , _UpperCAmelCase )
# Out features must be a list
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(('''a''', '''b''') , (0, 1) , ['''a''', '''b'''] )
# Out features must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 1) , ['''a'''] )
# Out indices must be a list or tuple
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , 0 , ['''a''', '''b'''] )
# Out indices must be a subset of stage names
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(_UpperCAmelCase , (0, 1) , ['''a'''] )
# Out features and out indices must be the same length
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0,) , ['''a''', '''b''', '''c'''] )
# Out features should match out indices
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''a''', '''b'''] , (0, 2) , ['''a''', '''b''', '''c'''] )
# Out features and out indices should be in order
with self.assertRaises(_UpperCAmelCase ):
verify_out_features_out_indices(['''b''', '''a'''] , (0, 1) , ['''a''', '''b'''] )
# Check passes with valid inputs
verify_out_features_out_indices(['''a''', '''b''', '''d'''] , (0, 1, -1) , ['''a''', '''b''', '''c''', '''d'''] )
def UpperCamelCase__ ( self ):
snake_case_ = BackboneMixin()
snake_case_ = ['''a''', '''b''', '''c''']
snake_case_ = ['''a''', '''c''']
snake_case_ = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
snake_case_ = ['''a''', '''b''']
self.assertEqual(backbone.out_features , ['''a''', '''b'''] )
self.assertEqual(backbone.out_indices , [0, 1] )
snake_case_ = [-3, -1]
self.assertEqual(backbone.out_features , ['''a''', '''c'''] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 267
| 1
|
from __future__ import annotations
from collections.abc import Iterator
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] =value
__UpperCamelCase : Node | None =None
__UpperCamelCase : Node | None =None
class __A :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase : List[str] =tree
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
A_ :List[str] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
A_ :Any = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
A_ :Tuple = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
A_ :List[str] = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
A_ :Tuple = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def __lowercase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=[1, 10, 100] , lowerCamelCase__=4 , lowerCamelCase__=3.0 ):
"""simple docstring"""
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=lowerCamelCase__ ) as executor:
__UpperCamelCase : List[str] =[]
__UpperCamelCase : Any =Counter()
__UpperCamelCase : List[Any] =0
__UpperCamelCase : int =defaultdict(lowerCamelCase__ )
for task_id, (candidates, test_case) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
for candidate in candidates:
__UpperCamelCase : str =candidate + '\n' + test_case
__UpperCamelCase : Any =(test_program, timeout, task_id, completion_id[task_id])
__UpperCamelCase : Optional[Any] =executor.submit(lowerCamelCase__ , *lowerCamelCase__ )
futures.append(lowerCamelCase__ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(lowerCamelCase__ ):
__UpperCamelCase : str =future.result()
results[result["task_id"]].append((result['completion_id'], result) )
__UpperCamelCase , __UpperCamelCase : int =[], []
for result in results.values():
result.sort()
__UpperCamelCase : str =[r[1]['passed'] for r in result]
total.append(len(lowerCamelCase__ ) )
correct.append(sum(lowerCamelCase__ ) )
__UpperCamelCase : Optional[int] =np.array(lowerCamelCase__ )
__UpperCamelCase : List[str] =np.array(lowerCamelCase__ )
__UpperCamelCase : Union[str, Any] =k
__UpperCamelCase : List[Any] ={f'pass@{k}': estimate_pass_at_k(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def A ( a_ ,a_ ,a_ ) -> Optional[int]:
def estimator(a_ ,a_ ,a_ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 ,n + 1 ) )
if isinstance(a_ ,a_ ):
__UpperCamelCase : Optional[int] =itertools.repeat(a_ ,len(a_ ) )
else:
assert len(a_ ) == len(a_ )
__UpperCamelCase : List[Any] =iter(a_ )
return np.array([estimator(int(a_ ) ,int(a_ ) ,a_ ) for n, c in zip(a_ ,a_ )] )
| 71
| 1
|
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCamelCase : Optional[Any] = open # noqa: we just need to have a builtin inside this module to test it properly
| 354
|
from math import isqrt
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __UpperCamelCase , __UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = False
return [i for i in range(2 , __UpperCamelCase ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 10**8 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = calculate_prime_numbers(max_number // 2 )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204
| 0
|
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def __snake_case ( _lowerCAmelCase : int ) -> int:
# A local function to see if a dot lands in the circle.
def is_in_circle(_lowerCAmelCase : float , _lowerCAmelCase : float ) -> bool:
A_ : str = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
A_ : Optional[int] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
A_ : int = proportion * 4
print(f"The estimated value of pi is {pi_estimate}" )
print(f"The numpy value of pi is {pi}" )
print(f"The total error is {abs(pi - pi_estimate )}" )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Callable[[float], float] , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(_lowerCAmelCase , _lowerCAmelCase ) ) for _ in range(_lowerCAmelCase ) ) * (max_value - min_value)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : float = 0.0 , _lowerCAmelCase : float = 1.0 ) -> None:
def identity_function(_lowerCAmelCase : float ) -> float:
return x
A_ : Any = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
A_ : Tuple = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"Estimating area under y=x where x varies from {min_value} to {max_value}" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {expected_value}" )
print(f"Total error is {abs(estimated_value - expected_value )}" )
print("******************" )
def __snake_case ( _lowerCAmelCase : int ) -> None:
def function_to_integrate(_lowerCAmelCase : float ) -> float:
return sqrt(4.0 - x * x )
A_ : List[str] = area_under_curve_estimator(
_lowerCAmelCase , _lowerCAmelCase , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"Estimated value is {estimated_value}" )
print(f"Expected value is {pi}" )
print(f"Total error is {abs(estimated_value - pi )}" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
"""simple docstring"""
def __init__( self :Dict , snake_case :Optional[int] , snake_case :Tuple=13 , snake_case :List[Any]=30 , snake_case :Union[str, Any]=2 , snake_case :List[Any]=3 , snake_case :Tuple=True , snake_case :Dict=True , snake_case :Dict=32 , snake_case :List[str]=5 , snake_case :Optional[Any]=4 , snake_case :Any=37 , snake_case :Dict="gelu" , snake_case :List[str]=0.1 , snake_case :str=0.1 , snake_case :Tuple=10 , snake_case :str=0.02 , snake_case :Optional[Any]=None , ):
'''simple docstring'''
A_ : Tuple = parent
A_ : int = batch_size
A_ : List[str] = image_size
A_ : List[Any] = patch_size
A_ : Optional[Any] = num_channels
A_ : List[Any] = is_training
A_ : Tuple = use_labels
A_ : Union[str, Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Any = num_attention_heads
A_ : List[str] = intermediate_size
A_ : Optional[int] = hidden_act
A_ : List[str] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Any = type_sequence_label_size
A_ : List[str] = initializer_range
A_ : Dict = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A_ : Optional[int] = (image_size // patch_size) ** 2
A_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :List[Any] , snake_case :str , snake_case :Tuple ):
'''simple docstring'''
A_ : Optional[Any] = ViTMSNModel(config=snake_case )
model.to(snake_case )
model.eval()
A_ : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[int] , snake_case :List[str] , snake_case :List[str] ):
'''simple docstring'''
A_ : Dict = self.type_sequence_label_size
A_ : Tuple = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Union[str, Any] = model(snake_case , labels=snake_case )
print("Pixel and labels shape: {pixel_values.shape}, {labels.shape}" )
print("Labels: {labels}" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A_ : Union[str, Any] = 1
A_ : int = ViTMSNForImageClassification(snake_case )
model.to(snake_case )
model.eval()
A_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : Optional[Any] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Optional[int] = config_and_inputs
A_ : Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{'''feature-extraction''': ViTMSNModel, '''image-classification''': ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = ViTMSNModelTester(self )
A_ : str = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMSN does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ , A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ , A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(snake_case )
A_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = ViTMSNModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def __snake_case ( ) -> Optional[Any]:
A_ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("facebook/vit-msn-small" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
torch.manual_seed(2 )
A_ : Any = ViTMSNForImageClassification.from_pretrained("facebook/vit-msn-small" ).to(snake_case )
A_ : List[str] = self.default_image_processor
A_ : int = prepare_img()
A_ : List[str] = image_processor(images=snake_case , return_tensors="pt" ).to(snake_case )
# forward pass
with torch.no_grad():
A_ : Optional[int] = model(**snake_case )
# verify the logits
A_ : List[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , snake_case )
A_ : int = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1e-4 ) )
| 300
| 1
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 368
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCAmelCase__ : Optional[Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def __UpperCamelCase ( _UpperCAmelCase ):
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__UpperCAmelCase : List[str] = list(s_dict.keys() )
for key in keys:
__UpperCAmelCase : int = R".*/layers_(\d+)"
__UpperCAmelCase : List[str] = key
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : Optional[int] = re.sub(R"layers_(\d+)", R"block/\1/layer", _UpperCAmelCase )
__UpperCAmelCase : Any = R"(encoder|decoder)\/"
if re.match(_UpperCAmelCase, _UpperCAmelCase ):
__UpperCAmelCase : List[Any] = re.match(_UpperCAmelCase, _UpperCAmelCase ).groups()
if groups[0] == "encoder":
__UpperCAmelCase : Optional[Any] = re.sub(R"/mlp/", R"/1/mlp/", _UpperCAmelCase )
__UpperCAmelCase : List[Any] = re.sub(R"/pre_mlp_layer_norm/", R"/1/layer_norm/", _UpperCAmelCase )
elif groups[0] == "decoder":
__UpperCAmelCase : List[Any] = re.sub(R"/mlp/", R"/2/mlp/", _UpperCAmelCase )
__UpperCAmelCase : Any = re.sub(R"/pre_mlp_layer_norm/", R"/2/layer_norm/", _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__UpperCAmelCase : List[str] = new_key.replace(_UpperCAmelCase, _UpperCAmelCase )
print(F"{key} -> {new_key}" )
__UpperCAmelCase : Any = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Tuple = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__UpperCAmelCase : Optional[Any] = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
__UpperCAmelCase : Any = s_dict[key].shape[0]
__UpperCAmelCase : str = s_dict[key]
for idx in range(_UpperCAmelCase ):
__UpperCAmelCase : Optional[Any] = expert_weihts[idx]
print(F"{key} -> {key.replace('expert/', 'nested fstring' )}" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowerCAmelCase__ : Optional[Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase, "r" ) as f:
__UpperCAmelCase : List[Any] = f.read()
__UpperCAmelCase : Union[str, Any] = re.findall(R"(.*) = ([0-9.]*)", _UpperCAmelCase )
__UpperCAmelCase : Dict = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__UpperCAmelCase : Tuple = float(_UpperCAmelCase ) if "." in value else int(_UpperCAmelCase )
__UpperCAmelCase : str = re.findall(R"(.*activations) = \(\'(.*)\',\)", _UpperCAmelCase )[0]
__UpperCAmelCase : int = str(activation[1] )
__UpperCAmelCase : int = num_experts
__UpperCAmelCase : List[str] = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase="./", _UpperCAmelCase=8 ):
# Initialise PyTorch model
print(F"Loading flax weights from : {flax_checkpoint_path}" )
__UpperCAmelCase : Dict = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
__UpperCAmelCase : int = convert_gin_to_config(_UpperCAmelCase, _UpperCAmelCase )
else:
__UpperCAmelCase : int = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
__UpperCAmelCase : Any = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
__UpperCAmelCase : str = flax_params["target"]
__UpperCAmelCase : Any = flatten_dict(_UpperCAmelCase, sep="/" )
__UpperCAmelCase : Optional[Any] = rename_keys(_UpperCAmelCase )
__UpperCAmelCase : Any = unflatten_dict(_UpperCAmelCase, sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase, _UpperCAmelCase )
print(F"Save PyTorch model to {pytorch_dump_path}" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCAmelCase__ : int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 37
| 0
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class lowerCAmelCase__ ( _snake_case):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 42
class lowerCAmelCase__ ( _snake_case , _snake_case):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
@register_to_config
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 3 , __lowerCamelCase = ("DownEncoderBlock2D",) , __lowerCamelCase = ("UpDecoderBlock2D",) , __lowerCamelCase = (6_4,) , __lowerCamelCase = 1 , __lowerCamelCase = "silu" , __lowerCamelCase = 4 , __lowerCamelCase = 3_2 , __lowerCamelCase = 3_2 , __lowerCamelCase = 0.1_8_2_1_5 , ) -> Optional[int]:
super().__init__()
# pass init params to Encoder
_A : str = Encoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , down_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , act_fn=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , double_z=UpperCamelCase__ , )
# pass init params to Decoder
_A : str = Decoder(
in_channels=UpperCamelCase__ , out_channels=UpperCamelCase__ , up_block_types=UpperCamelCase__ , block_out_channels=UpperCamelCase__ , layers_per_block=UpperCamelCase__ , norm_num_groups=UpperCamelCase__ , act_fn=UpperCamelCase__ , )
_A : int = nn.Convad(2 * latent_channels , 2 * latent_channels , 1)
_A : List[str] = nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1)
_A : List[Any] = False
_A : Tuple = False
# only relevant if vae tiling is enabled
_A : Tuple = self.config.sample_size
_A : Any = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple))
else self.config.sample_size
)
_A : Any = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
_A : int = 0.2_5
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase=False) -> Any:
if isinstance(UpperCamelCase__ , (Encoder, Decoder)):
_A : int = value
def _lowerCamelCase ( self , __lowerCamelCase = True) -> List[str]:
_A : str = use_tiling
def _lowerCamelCase ( self) -> List[Any]:
self.enable_tiling(UpperCamelCase__)
def _lowerCamelCase ( self) -> List[str]:
_A : Any = True
def _lowerCamelCase ( self) -> Optional[Any]:
_A : Union[str, Any] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowerCamelCase ( self) -> Dict[str, AttentionProcessor]:
_A : List[Any] = {}
def fn_recursive_add_processors(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase):
if hasattr(UpperCamelCase__ , "set_processor"):
_A : List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , UpperCamelCase__ , UpperCamelCase__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
return processors
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A : Tuple = len(self.attn_processors.keys())
if isinstance(UpperCamelCase__ , UpperCamelCase__) and len(UpperCamelCase__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(UpperCamelCase__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase):
if hasattr(UpperCamelCase__ , "set_processor"):
if not isinstance(UpperCamelCase__ , UpperCamelCase__):
module.set_processor(UpperCamelCase__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , UpperCamelCase__ , UpperCamelCase__)
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__)
def _lowerCamelCase ( self) -> Any:
self.set_attn_processor(AttnProcessor())
@apply_forward_hook
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = True) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(UpperCamelCase__ , return_dict=UpperCamelCase__)
if self.use_slicing and x.shape[0] > 1:
_A : Optional[Any] = [self.encoder(UpperCamelCase__) for x_slice in x.split(1)]
_A : List[Any] = torch.cat(UpperCamelCase__)
else:
_A : str = self.encoder(UpperCamelCase__)
_A : str = self.quant_conv(UpperCamelCase__)
_A : Optional[int] = DiagonalGaussianDistribution(UpperCamelCase__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = True) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(UpperCamelCase__ , return_dict=UpperCamelCase__)
_A : Dict = self.post_quant_conv(UpperCamelCase__)
_A : int = self.decoder(UpperCamelCase__)
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__)
@apply_forward_hook
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = True) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
_A : Tuple = [self._decode(UpperCamelCase__).sample for z_slice in z.split(1)]
_A : Optional[int] = torch.cat(UpperCamelCase__)
else:
_A : List[Any] = self._decode(UpperCamelCase__).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=UpperCamelCase__)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> int:
_A : Tuple = min(a.shape[2] , b.shape[2] , UpperCamelCase__)
for y in range(UpperCamelCase__):
_A : Union[str, Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Dict:
_A : Optional[Any] = min(a.shape[3] , b.shape[3] , UpperCamelCase__)
for x in range(UpperCamelCase__):
_A : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = True) -> AutoencoderKLOutput:
_A : Optional[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
_A : str = int(self.tile_latent_min_size * self.tile_overlap_factor)
_A : List[str] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
_A : Any = []
for i in range(0 , x.shape[2] , UpperCamelCase__):
_A : Any = []
for j in range(0 , x.shape[3] , UpperCamelCase__):
_A : Tuple = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
_A : Optional[Any] = self.encoder(UpperCamelCase__)
_A : int = self.quant_conv(UpperCamelCase__)
row.append(UpperCamelCase__)
rows.append(UpperCamelCase__)
_A : str = []
for i, row in enumerate(UpperCamelCase__):
_A : str = []
for j, tile in enumerate(UpperCamelCase__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : int = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__)
if j > 0:
_A : Dict = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(UpperCamelCase__ , dim=3))
_A : List[str] = torch.cat(UpperCamelCase__ , dim=2)
_A : Union[str, Any] = DiagonalGaussianDistribution(UpperCamelCase__)
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=UpperCamelCase__)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = True) -> Union[DecoderOutput, torch.FloatTensor]:
_A : Dict = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
_A : int = int(self.tile_sample_min_size * self.tile_overlap_factor)
_A : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
_A : Dict = []
for i in range(0 , z.shape[2] , UpperCamelCase__):
_A : List[Any] = []
for j in range(0 , z.shape[3] , UpperCamelCase__):
_A : int = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
_A : int = self.post_quant_conv(UpperCamelCase__)
_A : str = self.decoder(UpperCamelCase__)
row.append(UpperCamelCase__)
rows.append(UpperCamelCase__)
_A : str = []
for i, row in enumerate(UpperCamelCase__):
_A : Optional[Any] = []
for j, tile in enumerate(UpperCamelCase__):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
_A : List[Any] = self.blend_v(rows[i - 1][j] , UpperCamelCase__ , UpperCamelCase__)
if j > 0:
_A : Any = self.blend_h(row[j - 1] , UpperCamelCase__ , UpperCamelCase__)
result_row.append(tile[:, :, :row_limit, :row_limit])
result_rows.append(torch.cat(UpperCamelCase__ , dim=3))
_A : Any = torch.cat(UpperCamelCase__ , dim=2)
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
_A : List[str] = sample
_A : str = self.encode(UpperCamelCase__).latent_dist
if sample_posterior:
_A : List[str] = posterior.sample(generator=UpperCamelCase__)
else:
_A : Union[str, Any] = posterior.mode()
_A : int = self.decode(UpperCamelCase__).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCamelCase__)
| 11
|
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for data in source_data:
for i, el in enumerate(UpperCAmelCase__ ):
if len(UpperCAmelCase__ ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(UpperCAmelCase__ ) )
return data_lists
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = []
for dlist, weight in zip(UpperCAmelCase__, UpperCAmelCase__ ):
A_ = min(UpperCAmelCase__ )
A_ = max(UpperCAmelCase__ )
A_ = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
A_ = F'''Invalid weight of {weight:f} provided'''
raise ValueError(UpperCAmelCase__ )
score_lists.append(UpperCAmelCase__ )
return score_lists
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> list[float]:
A_ = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(UpperCAmelCase__ ):
A_ = final_scores[j] + ele
return final_scores
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> list[list[float]]:
A_ = get_data(UpperCAmelCase__ )
A_ = calculate_each_score(UpperCAmelCase__, UpperCAmelCase__ )
A_ = generate_final_scores(UpperCAmelCase__ )
# append scores to source data
for i, ele in enumerate(UpperCAmelCase__ ):
source_data[i].append(UpperCAmelCase__ )
return source_data
| 162
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase_ ( unittest.TestCase ):
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 0
@slow
def lowercase_ ( self : Dict ):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(_A )
self.assertIsNotNone(_A )
self.assertIsInstance(_A , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(_A ) , 0 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 20 )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = AutoConfig.from_pretrained(_A )
self.assertIsInstance(_A , _A )
# Check that tokenizer_type ≠ model_type
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , config=_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 12 )
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' , use_fast=_A )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' , use_fast=_A )
self.assertIsInstance(_A , _A )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''' , os.path.join(_A , '''vocab.txt''' ) )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(_A , tokenizer_type='''bert''' )
self.assertIsInstance(_A , _A )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''' , os.path.join(_A , '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''' , os.path.join(_A , '''merges.txt''' ) )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , tokenizer_type='''gpt2''' )
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
with pytest.raises(_A ):
AutoTokenizer.from_pretrained('''./''' , tokenizer_type='''xxx''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
UpperCAmelCase__ : Optional[int] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
if isinstance(_A , _A ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _A )
else:
self.assertEqual(tokenizer.do_lower_case , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
@require_tokenizers
def lowercase_ ( self : List[str] ):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_A , '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''' , ):
UpperCAmelCase__ : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = TOKENIZER_MAPPING.values()
UpperCAmelCase__ : Any = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_A )
@require_tokenizers
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=_A ) , _A )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ) , _A )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained('''distilbert-base-uncased''' , do_lower_case=_A )
UpperCAmelCase__ : Any = '''Hello, world. How are you?'''
UpperCAmelCase__ : Dict = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''' , do_lower_case=_A )
UpperCAmelCase__ : Union[str, Any] = tokenizer.tokenize(_A )
self.assertEqual('''[UNK]''' , tokens[0] )
@require_tokenizers
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(_A ) , _A )
self.assertEqual(tokenizer.model_max_length , 512 )
self.assertEqual(tokenizer.vocab_size , 30_000 )
self.assertEqual(tokenizer.unk_token , '''[UNK]''' )
self.assertEqual(tokenizer.padding_side , '''right''' )
self.assertEqual(tokenizer.truncation_side , '''right''' )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 12 )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_A , _A )
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : str = get_tokenizer_config('''bert-base-cased''' )
UpperCAmelCase__ : Optional[int] = config.pop('''_commit_hash''' , _A )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_A , {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
UpperCAmelCase__ : Tuple = get_tokenizer_config(_A )
self.assertDictEqual(_A , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = get_tokenizer_config(_A )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''] , '''BertTokenizer''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizer.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase_ ( self : Any ):
'''simple docstring'''
try:
AutoConfig.register('''custom''' , _A )
# Can register in two steps
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_A , slow_tokenizer_class=_A , fast_tokenizer_class=_A )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase__ : Any = BertTokenizerFast.from_pretrained(_A )
bert_tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = CustomTokenizerFast.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(_A , use_fast=_A )
self.assertIsInstance(_A , _A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(_A ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_A )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(_A , trust_remote_code=_A , use_fast=_A )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , '''NewTokenizer''' )
@require_tokenizers
def lowercase_ ( self : int ):
'''simple docstring'''
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = False
class lowerCamelCase_ ( __a ):
lowerCAmelCase__ = NewTokenizer
lowerCAmelCase__ = False
try:
AutoConfig.register('''custom''' , _A )
AutoTokenizer.register(_A , slow_tokenizer_class=_A )
AutoTokenizer.register(_A , fast_tokenizer_class=_A )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''' , trust_remote_code=_A , use_fast=_A )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
UpperCAmelCase__ : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''' , trust_remote_code=_A , use_fast=_A )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , '''bert-base is not a local folder and is not a valid model identifier''' ):
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained('''bert-base''' )
def lowercase_ ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
_A , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(_A , revision='''aaaaaa''' )
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
UpperCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 299
| 0
|
'''simple docstring'''
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def a__ ( a__ ):
"""simple docstring"""
return "".join(sorted(a__ ) )
def a__ ( a__ ):
"""simple docstring"""
return word_by_signature[signature(a__ )]
UpperCAmelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCAmelCase : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase : Optional[Any] = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase : List[Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 267
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname , __SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple , **__SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> List[str]:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(__SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
__SCREAMING_SNAKE_CASE = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer(__SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__SCREAMING_SNAKE_CASE ):
processor()
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = CLIPProcessor(tokenizer=__SCREAMING_SNAKE_CASE , image_processor=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=__SCREAMING_SNAKE_CASE , images=__SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 267
| 1
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class A :
"""simple docstring"""
def __init__( self : str,lowercase_ : int,lowercase_ : Optional[Any]=1_3,lowercase_ : Optional[int]=7,lowercase_ : str=True,lowercase_ : Optional[int]=True,lowercase_ : Dict=True,lowercase_ : str=True,lowercase_ : Union[str, Any]=9_9,lowercase_ : Union[str, Any]=3_2,lowercase_ : Optional[Any]=2,lowercase_ : int=4,lowercase_ : str=3_7,lowercase_ : int="gelu",lowercase_ : Dict=0.1,lowercase_ : Union[str, Any]=0.1,lowercase_ : Optional[int]=5_1_2,lowercase_ : Union[str, Any]=1_6,lowercase_ : Any=2,lowercase_ : Dict=0.02,lowercase_ : int=3,lowercase_ : Dict=4,lowercase_ : str=None,lowercase_ : Optional[int]=0,)-> Optional[int]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self : List[str] )-> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length],self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,is_decoder=lowercase_,initializer_range=self.initializer_range,)
A__ = DPRConfig(projection_dim=self.projection_dim,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : List[Any],lowercase_ : Optional[int],lowercase_ : Optional[Any],lowercase_ : str,lowercase_ : int,lowercase_ : Tuple,lowercase_ : List[Any],lowercase_ : Dict )-> List[str]:
'''simple docstring'''
A__ = TFDPRContextEncoder(config=lowercase_ )
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self : Optional[int],lowercase_ : Optional[Any],lowercase_ : Tuple,lowercase_ : Any,lowercase_ : Any,lowercase_ : Union[str, Any],lowercase_ : Union[str, Any],lowercase_ : str )-> str:
'''simple docstring'''
A__ = TFDPRQuestionEncoder(config=lowercase_ )
A__ = model(lowercase_,attention_mask=lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_,token_type_ids=lowercase_ )
A__ = model(lowercase_ )
self.parent.assertEqual(result.pooler_output.shape,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self : int,lowercase_ : List[Any],lowercase_ : Optional[Any],lowercase_ : Any,lowercase_ : Any,lowercase_ : List[Any],lowercase_ : Union[str, Any],lowercase_ : Tuple )-> List[Any]:
'''simple docstring'''
A__ = TFDPRReader(config=lowercase_ )
A__ = model(lowercase_,attention_mask=lowercase_ )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape,(self.batch_size,) )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class A ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCamelCase = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
lowerCamelCase = False
def snake_case__ ( self : str )-> List[Any]:
'''simple docstring'''
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self,config_class=lowercase_,hidden_size=3_7 )
def snake_case__ ( self : Dict )-> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : Tuple )-> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*lowercase_ )
def snake_case__ ( self : Optional[int] )-> Union[str, Any]:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*lowercase_ )
def snake_case__ ( self : List[Any] )-> int:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*lowercase_ )
@slow
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_tf
class A ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : int )-> Dict:
'''simple docstring'''
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(lowercase_ )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.03_236_253,
0.12_753_335,
0.16_818_509,
0.00_279_786,
0.3_896_933,
0.24_264_945,
0.2_178_971,
-0.02_335_227,
-0.08_481_959,
-0.14_324_117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 282
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class A ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ):
A__ = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase_ )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Dict )-> List[str]:
'''simple docstring'''
A__ = 'sgugger/tiny-distilbert-classification'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu','Cant do half precision' )
def snake_case__ ( self : Any )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Any )-> Optional[Any]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
# set architectures equal to `None`
A__ = None
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : Union[str, Any] )-> int:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' )
def snake_case__ ( self : List[Any] )-> Dict:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Optional[int]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[Any] )-> Any:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case__ ( self : List[str] )-> List[str]:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : int )-> Union[str, Any]:
'''simple docstring'''
A__ = 'sshleifer/tinier_bart'
A__ = AutoConfig.from_pretrained(lowercase_ )
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_,configs=[config] )
A__ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case__ ( self : Optional[Any] )-> Tuple:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() )
def snake_case__ ( self : Tuple )-> str:
'''simple docstring'''
A__ = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase_ : Optional[Any] ):
self.assertTrue(hasattr(lowercase_,'sequential' ) )
self.assertTrue(hasattr(lowercase_,'cumulative' ) )
self.assertTrue(hasattr(lowercase_,'current' ) )
self.assertTrue(hasattr(lowercase_,'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
A__ = PyTorchBenchmarkArguments(
models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,)
A__ = PyTorchBenchmark(lowercase_ )
A__ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
| 282
| 1
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase : Dict = []
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for v in tree.values():
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(UpperCAmelCase_ ) )
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = []
for d in reversed(UpperCAmelCase_ ):
idx.append(flat_idx % d )
_UpperCamelCase : Optional[Any] = flat_idx // d
return tuple(reversed(UpperCAmelCase_ ) )
@torch.jit.ignore
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , UpperCAmelCase_ = None , ):
def reduce_edge_list(UpperCAmelCase_ ) -> None:
_UpperCamelCase : Optional[int] = True
for i in range(len(UpperCAmelCase_ ) ):
_UpperCamelCase : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCamelCase : Dict = l[reversed_idx]
if start_edges is None:
_UpperCamelCase : Optional[int] = [s == 0 for s in start]
reduce_edge_list(UpperCAmelCase_ )
if end_edges is None:
_UpperCamelCase : Any = [e == (d - 1) for e, d in zip(UpperCAmelCase_ , UpperCAmelCase_ )]
reduce_edge_list(UpperCAmelCase_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(UpperCAmelCase_ ) == 0:
return [()]
elif len(UpperCAmelCase_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_UpperCamelCase : Optional[int] = []
_UpperCamelCase : Any = []
# Dimensions common to start and end can be selected directly
for s, e in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if s == e:
path_list.append(slice(UpperCAmelCase_ , s + 1 ) )
else:
break
_UpperCamelCase : Optional[int] = tuple(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = len(UpperCAmelCase_ )
# start == end, and we're done
if divergence_idx == len(UpperCAmelCase_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : str = start[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCamelCase : Optional[Any] = end[divergence_idx]
return tuple(
path + (slice(UpperCAmelCase_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCamelCase : Optional[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = t.shape[:no_batch_dims]
_UpperCamelCase : Optional[Any] = list(_flat_idx_to_idx(UpperCAmelCase_ , UpperCAmelCase_ ) )
# _get_minimal_slice_set is inclusive
_UpperCamelCase : Optional[int] = list(_flat_idx_to_idx(flat_end - 1 , UpperCAmelCase_ ) )
# Get an ordered list of slices to perform
_UpperCamelCase : Tuple = _get_minimal_slice_set(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
_UpperCamelCase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = False , UpperCAmelCase_ = None , UpperCAmelCase_ = False , ):
if not (len(UpperCAmelCase_ ) > 0):
raise ValueError('Must provide at least one input' )
_UpperCamelCase : Dict = [shape[:no_batch_dims] for shape in _fetch_dims(UpperCAmelCase_ )]
_UpperCamelCase : List[Any] = tuple([max(UpperCAmelCase_ ) for s in zip(*UpperCAmelCase_ )] )
def _prep_inputs(UpperCAmelCase_ ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCamelCase : Optional[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCamelCase : int = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_UpperCamelCase : Optional[int] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCamelCase : int = tensor_tree_map(_prep_inputs , UpperCAmelCase_ )
_UpperCamelCase : Dict = None
if _out is not None:
_UpperCamelCase : Union[str, Any] = tensor_tree_map(lambda UpperCAmelCase_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_UpperCamelCase : Dict = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCamelCase : List[str] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(UpperCAmelCase_ ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCamelCase : List[Any] = 0
_UpperCamelCase : Any = prepped_outputs
for _ in range(UpperCAmelCase_ ):
# Chunk the input
if not low_mem:
_UpperCamelCase : List[Any] = _select_chunk
else:
_UpperCamelCase : Union[str, Any] = partial(
_chunk_slice , flat_start=UpperCAmelCase_ , flat_end=min(UpperCAmelCase_ , i + chunk_size ) , no_batch_dims=len(UpperCAmelCase_ ) , )
_UpperCamelCase : Optional[int] = tensor_tree_map(UpperCAmelCase_ , UpperCAmelCase_ )
# Run the layer on the chunk
_UpperCamelCase : List[str] = layer(**UpperCAmelCase_ )
# Allocate space for the output
if out is None:
_UpperCamelCase : List[Any] = tensor_tree_map(lambda UpperCAmelCase_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , UpperCAmelCase_ )
# Put the chunk in its pre-allocated space
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
def assign(UpperCAmelCase_ , UpperCAmelCase_ ) -> None:
for k, v in da.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
assign(UpperCAmelCase_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCamelCase : List[Any] = da[k]
assign(UpperCAmelCase_ , UpperCAmelCase_ )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for xa, xa in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCamelCase : Optional[Any] = xa
elif isinstance(UpperCAmelCase_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCamelCase : List[Any] = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
_UpperCamelCase : Any = tensor_tree_map(lambda UpperCAmelCase_ : t.view(orig_batch_dims + t.shape[1:] ) , UpperCAmelCase_ )
return out
class lowercase__ :
def __init__( self : Optional[Any] ,lowerCamelCase__ : int = 512 ,):
'''simple docstring'''
_UpperCamelCase : Optional[int] = max_chunk_size
_UpperCamelCase : Any = None
_UpperCamelCase : Optional[Any] = None
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Callable ,lowerCamelCase__ : tuple ,lowerCamelCase__ : int ):
'''simple docstring'''
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCamelCase : Optional[Any] = [2**l for l in range(int(math.log(self.max_chunk_size ,2 ) ) + 1 )]
_UpperCamelCase : Optional[Any] = [c for c in candidates if c > min_chunk_size]
_UpperCamelCase : Tuple = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase__ : int ) -> bool:
try:
with torch.no_grad():
fn(*A_ ,chunk_size=A_ )
return True
except RuntimeError:
return False
_UpperCamelCase : Tuple = 0
_UpperCamelCase : Dict = len(A_ ) - 1
while i > min_viable_chunk_size_index:
_UpperCamelCase : Union[str, Any] = test_chunk_size(candidates[i] )
if not viable:
_UpperCamelCase : Tuple = (min_viable_chunk_size_index + i) // 2
else:
_UpperCamelCase : List[Any] = i
_UpperCamelCase : List[Any] = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Iterable ,lowerCamelCase__ : Iterable ):
'''simple docstring'''
_UpperCamelCase : Any = True
for aa, aa in zip(A_ ,A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ ,(list, tuple) ):
consistent &= self._compare_arg_caches(A_ ,A_ )
elif isinstance(A_ ,A_ ):
_UpperCamelCase : Dict = [v for _, v in sorted(aa.items() ,key=lambda lowerCamelCase__ : x[0] )]
_UpperCamelCase : int = [v for _, v in sorted(aa.items() ,key=lambda lowerCamelCase__ : x[0] )]
consistent &= self._compare_arg_caches(A_ ,A_ )
else:
consistent &= aa == aa
return consistent
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : Callable ,lowerCamelCase__ : tuple ,lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : Tuple = tree_map(lambda lowerCamelCase__ : a.shape if isinstance(A_ ,torch.Tensor ) else a ,A_ ,A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
_UpperCamelCase : int = self._compare_arg_caches(self.cached_arg_data ,A_ )
else:
# Otherwise, we can reuse the precomputed value
_UpperCamelCase : int = False
if not consistent:
_UpperCamelCase : int = self._determine_favorable_chunk_size(
A_ ,A_ ,A_ ,)
_UpperCamelCase : Optional[int] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 83
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : str = {
"configuration_nezha": ["NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP", "NezhaConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[str] = [
"NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST",
"NezhaForNextSentencePrediction",
"NezhaForMaskedLM",
"NezhaForPreTraining",
"NezhaForMultipleChoice",
"NezhaForQuestionAnswering",
"NezhaForSequenceClassification",
"NezhaForTokenClassification",
"NezhaModel",
"NezhaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 204
| 0
|
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __snake_case ( unittest.TestCase ):
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=30 , snake_case__=2 , snake_case__=3 , snake_case__=True , snake_case__=True , snake_case__=32 , snake_case__=5 , snake_case__=4 , snake_case__=37 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=10 , snake_case__=0.02 , ) -> str:
'''simple docstring'''
UpperCAmelCase : Optional[int] =parent
UpperCAmelCase : Optional[Any] =batch_size
UpperCAmelCase : Any =image_size
UpperCAmelCase : Any =patch_size
UpperCAmelCase : List[Any] =num_channels
UpperCAmelCase : Optional[int] =is_training
UpperCAmelCase : str =use_labels
UpperCAmelCase : str =hidden_size
UpperCAmelCase : Union[str, Any] =num_hidden_layers
UpperCAmelCase : Any =num_attention_heads
UpperCAmelCase : Optional[int] =intermediate_size
UpperCAmelCase : Dict =hidden_act
UpperCAmelCase : Optional[int] =hidden_dropout_prob
UpperCAmelCase : Union[str, Any] =attention_probs_dropout_prob
UpperCAmelCase : Tuple =type_sequence_label_size
UpperCAmelCase : Optional[Any] =initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Optional[int] =(image_size // patch_size) ** 2
UpperCAmelCase : Optional[int] =num_patches + 1
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : int =ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , )
return config, pixel_values
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =FlaxViTModel(config=snake_case__ )
UpperCAmelCase : Tuple =model(snake_case__ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase : Any =(self.image_size, self.image_size)
UpperCAmelCase : Union[str, Any] =(self.patch_size, self.patch_size)
UpperCAmelCase : str =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int =self.type_sequence_label_size
UpperCAmelCase : int =FlaxViTForImageClassification(config=snake_case__ )
UpperCAmelCase : Optional[Any] =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase : int =1
UpperCAmelCase : str =FlaxViTForImageClassification(snake_case__ )
UpperCAmelCase : Optional[int] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase : List[Any] =model(snake_case__ )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] =config_and_inputs
UpperCAmelCase : Tuple ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __snake_case ( lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : List[str] = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any =FlaxViTModelTester(self )
UpperCAmelCase : int =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : str =model_class(snake_case__ )
UpperCAmelCase : List[Any] =inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] =[*signature.parameters.keys()]
UpperCAmelCase : List[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case__ )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase : List[Any] =self._prepare_for_class(snake_case__ , snake_case__ )
UpperCAmelCase : List[str] =model_class(snake_case__ )
@jax.jit
def model_jitted(snake_case__ , **snake_case__ ):
return model(pixel_values=snake_case__ , **snake_case__ )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase : Union[str, Any] =model_jitted(**snake_case__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase : Dict =model_jitted(**snake_case__ ).to_tuple()
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for jitted_output, output in zip(snake_case__ , snake_case__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
UpperCAmelCase : Any =model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
UpperCAmelCase : Optional[Any] =model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(snake_case__ )
| 78
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __snake_case ( lowerCamelCase__ ):
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : List[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Tuple ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Optional[int] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : List[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[Any] =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Optional[Any] ='''1'''
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
UpperCAmelCase : Any ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
UpperCAmelCase : Union[str, Any] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
UpperCAmelCase : Union[str, Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(snake_case__ )
BertModel.from_pretrained(snake_case__ )
BertTokenizer.from_pretrained(snake_case__ )
pipeline(task='''fill-mask''' , model=snake_case__ )
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Any =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
UpperCAmelCase : List[str] =self.get_env()
UpperCAmelCase : Any =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
UpperCAmelCase : int ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
UpperCAmelCase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Any =self.get_env()
UpperCAmelCase : List[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
UpperCAmelCase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict ='''
from transformers import pipeline
'''
UpperCAmelCase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
UpperCAmelCase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : int ='''1'''
UpperCAmelCase : Optional[int] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
UpperCAmelCase : List[str] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any ='''
from transformers import AutoModel
'''
UpperCAmelCase : Optional[Any] ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
UpperCAmelCase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
UpperCAmelCase : Optional[int] =self.get_env()
UpperCAmelCase : Optional[Any] =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase : Any ='''1'''
UpperCAmelCase : Dict =subprocess.run(snake_case__ , env=snake_case__ , check=snake_case__ , capture_output=snake_case__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 78
| 1
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCAmelCase : Union[str, Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1"
UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-mbart"
@require_torch
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __A ( self , A=False , A=None , A=True , A=True , A=True , A=True , ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=__UpperCAmelCase , num_train_epochs=1 , distributed=__UpperCAmelCase , extra_args_str=__UpperCAmelCase , predict_with_generate=__UpperCAmelCase , do_train=__UpperCAmelCase , do_eval=__UpperCAmelCase , do_predict=__UpperCAmelCase , )
lowerCamelCase = TrainerState.load_from_json(os.path.join(__UpperCAmelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
lowerCamelCase = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCAmelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __A ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase )
@require_torch_multi_gpu
def __A ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __A ( self ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __A ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __A ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__UpperCAmelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __A ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__UpperCAmelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__UpperCAmelCase )
@require_apex
@require_torch_gpu
def __A ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCAmelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__UpperCAmelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __A ( self , A ) -> Any:
'''simple docstring'''
lowerCamelCase = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
lowerCamelCase = experiments[experiment_id]
lowerCamelCase = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
lowerCamelCase = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__UpperCAmelCase , extra_args_str=data["""extra_args_str"""] )
lowerCamelCase = len(re.findall(__UpperCAmelCase , cl.err ) )
self.assertEqual(__UpperCAmelCase , data["""n_matches"""] )
@slow
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = self.run_trainer(
eval_steps=2 , max_len=1_28 , model_name=__UpperCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=__UpperCAmelCase , )
# Check metrics
lowerCamelCase = TrainerState.load_from_json(os.path.join(__UpperCAmelCase , """trainer_state.json""" ) ).log_history
lowerCamelCase = [log for log in logs if """eval_loss""" in log.keys()]
lowerCamelCase = eval_metrics[0]
lowerCamelCase = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCAmelCase )
# test if do_predict saves generations and metrics
lowerCamelCase = os.listdir(__UpperCAmelCase )
lowerCamelCase = {os.path.basename(__UpperCAmelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A ) -> Tuple[int, float]:
lowerCamelCase = """--skip_memory_metrics 0"""
lowerCamelCase = self.run_trainer(
max_len=1_28 , model_name=__UpperCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=__UpperCAmelCase , distributed=__UpperCAmelCase , extra_args_str=__UpperCAmelCase , do_eval=__UpperCAmelCase , do_predict=__UpperCAmelCase , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase = TrainerState.load_from_json(Path(__UpperCAmelCase , """trainer_state.json""" ) ).log_history
lowerCamelCase = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**20 )
lowerCamelCase = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**20 )
lowerCamelCase = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase = 1_20
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__UpperCAmelCase , __UpperCAmelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
F' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
F' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB' , )
self.assertGreater(
__UpperCAmelCase , __UpperCAmelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
F' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
F' gpu_total_mem_bnb={gpu_total_mem_bnb}MB' , )
self.assertEqual(
__UpperCAmelCase , __UpperCAmelCase , F'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def __A ( self , A , A , A , A = 3e-3 , A = "adafactor" , A = False , A = None , A = 0 , A = True , A = True , A = True , A = True , A = None , ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
lowerCamelCase = self.get_auto_remove_tmp_dir()
lowerCamelCase = F'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(__UpperCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(__UpperCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
lowerCamelCase = F'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(__UpperCAmelCase )}\n '.split()
lowerCamelCase = """
--do_predict
""".split()
lowerCamelCase = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase = get_gpu_count()
lowerCamelCase = get_torch_dist_unique_port()
lowerCamelCase = F'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
lowerCamelCase = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCAmelCase , env=self.get_env() )
else:
lowerCamelCase = ["""run_translation.py"""] + args
with patch.object(__UpperCAmelCase , """argv""" , __UpperCAmelCase ):
main()
return output_dir
| 252
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = '''mid_block.attentions.0.'''
_lowerCAmelCase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{j}."""
_lowerCAmelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{i}."""
_lowerCAmelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : List[Any] = [None, None, None]
lowerCAmelCase__ : Dict = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Union[str, Any] = [None, None, None]
lowerCAmelCase__ : Any = v
continue
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 37
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case = {
"""configuration_mobilenet_v2""": [
"""MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""MobileNetV2Config""",
"""MobileNetV2OnnxConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = ["""MobileNetV2FeatureExtractor"""]
snake_case = ["""MobileNetV2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
"""MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileNetV2ForImageClassification""",
"""MobileNetV2ForSemanticSegmentation""",
"""MobileNetV2Model""",
"""MobileNetV2PreTrainedModel""",
"""load_tf_weights_in_mobilenet_v2""",
]
if TYPE_CHECKING:
from .configuration_mobilenet_va import (
MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileNetVaConfig,
MobileNetVaOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor
from .image_processing_mobilenet_va import MobileNetVaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilenet_va import (
MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileNetVaForImageClassification,
MobileNetVaForSemanticSegmentation,
MobileNetVaModel,
MobileNetVaPreTrainedModel,
load_tf_weights_in_mobilenet_va,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 319
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case = 16
snake_case = 32
def lowerCamelCase__ ( lowercase , lowercase = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=lowercase , max_length=lowercase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE : List[Any] = datasets.map(
lowercase , batched=lowercase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE : Optional[Any] = 8
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
return tokenizer.pad(
lowercase , padding="longest" , max_length=lowercase , pad_to_multiple_of=lowercase , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets["validation"] , shuffle=lowercase , collate_fn=lowercase , batch_size=lowercase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case = mocked_dataloaders # noqa: F811
def lowerCamelCase__ ( lowercase , lowercase ):
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , lowercase ) == "1":
SCREAMING_SNAKE_CASE : int = 2
# New Code #
SCREAMING_SNAKE_CASE : Union[str, Any] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE : Tuple = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=lowercase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE : Any = config["lr"]
SCREAMING_SNAKE_CASE : Optional[Any] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE : List[Any] = int(config["seed"] )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(config["batch_size"] )
SCREAMING_SNAKE_CASE : Optional[Any] = evaluate.load("glue" , "mrpc" )
set_seed(lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = get_dataloaders(lowercase , lowercase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE : List[Any] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=lowercase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE : Any = AdamW(params=model.parameters() , lr=lowercase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowercase , num_warmup_steps=100 , num_training_steps=(len(lowercase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Now we train the model
for epoch in range(lowercase ):
model.train()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(lowercase ):
SCREAMING_SNAKE_CASE : Any = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = output.loss
accelerator.backward(lowercase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowercase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE : List[Any] = model(**lowercase )
SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=lowercase , references=lowercase , )
SCREAMING_SNAKE_CASE : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=lowercase , default=lowercase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=lowercase , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(lowercase , lowercase )
if __name__ == "__main__":
main()
| 319
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = "hf-internal-testing/tiny-random-bert"
__A = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
__A = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = cached_file(_A , _A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_A , _A ) ) )
with open(os.path.join(_A , "refs" , "main" ) ) as f:
snake_case : Union[str, Any] = f.read()
self.assertEqual(_A , os.path.join(_A , "snapshots" , _A , _A ) )
self.assertTrue(os.path.isfile(_A ) )
# File is cached at the same place the second time.
snake_case : Union[str, Any] = cached_file(_A , _A )
self.assertEqual(_A , _A )
# Using a specific revision to test the full commit hash.
snake_case : List[str] = cached_file(_A , _A , revision="9b8c223" )
self.assertEqual(_A , os.path.join(_A , "snapshots" , _A , _A ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "is not a valid model identifier" ):
snake_case : Optional[Any] = cached_file("tiny-random-bert" , _A )
with self.assertRaisesRegex(_A , "is not a valid git identifier" ):
snake_case : Optional[Any] = cached_file(_A , _A , revision="aaaa" )
with self.assertRaisesRegex(_A , "does not appear to have a file named" ):
snake_case : List[str] = cached_file(_A , "conf" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with self.assertRaisesRegex(_A , "does not appear to have a file named" ):
snake_case : Optional[int] = cached_file(_A , "conf" )
with open(os.path.join(_A , "refs" , "main" ) ) as f:
snake_case : str = f.read()
self.assertTrue(os.path.isfile(os.path.join(_A , ".no_exist" , _A , "conf" ) ) )
snake_case : int = cached_file(_A , "conf" , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
snake_case : int = cached_file(_A , "conf" , local_files_only=_A , _raise_exceptions_for_missing_entries=_A )
self.assertIsNone(_A )
snake_case : Any = mock.Mock()
snake_case : str = 500
snake_case : Tuple = {}
snake_case : List[Any] = HTTPError
snake_case : Dict = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=_A ) as mock_head:
snake_case : Any = cached_file(_A , "conf" , _raise_exceptions_for_connection_errors=_A )
self.assertIsNone(_A )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , _A ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_A , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , _A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_A , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , _A , revision="ahaha" )
snake_case : Tuple = get_file_from_repo("bert-base-cased" , _A )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case : List[Any] = json.loads(open(_A , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case : Dict = Path(_A ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(_A , "a.txt" ) , str(_A ) )
self.assertIsNone(get_file_from_repo(_A , "b.txt" ) )
| 148
|
import math
import random
def A__ ( __lowerCamelCase, __lowerCamelCase = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCAmelCase = 0.02
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = float(2 * (random.randint(1, 1_00 )) - 1 )
for _ in range(__lowerCamelCase ):
# Forward propagation
SCREAMING_SNAKE_CASE_ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
SCREAMING_SNAKE_CASE_ = (expected / 1_00) - layer_a
# Error delta
SCREAMING_SNAKE_CASE_ = layer_1_error * sigmoid_function(__lowerCamelCase, __lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = int(input("Expected value: "))
__UpperCAmelCase = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 299
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_a : Any = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["pixel_values"]
def __init__( self , a__ = True , a__ = None , a__ = PILImageResampling.BILINEAR , a__ = True , a__ = None , a__ = True , a__ = 1 / 255 , a__ = True , a__ = None , a__ = None , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : Any = size if size is not None else {"""shortest_edge""": 256}
_lowerCAmelCase : List[str] = get_size_dict(a__ , default_to_square=a__ )
_lowerCAmelCase : Optional[int] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
_lowerCAmelCase : Tuple = get_size_dict(a__ , param_name="""crop_size""" )
_lowerCAmelCase : List[str] = do_resize
_lowerCAmelCase : int = size
_lowerCAmelCase : Any = resample
_lowerCAmelCase : List[Any] = do_center_crop
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Optional[int] = do_rescale
_lowerCAmelCase : List[Any] = rescale_factor
_lowerCAmelCase : List[Any] = do_normalize
_lowerCAmelCase : Optional[int] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self , a__ , a__ , a__ = PILImageResampling.BICUBIC , a__ = None , **a__ , ):
_lowerCAmelCase : Optional[int] = get_size_dict(a__ , default_to_square=a__ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_lowerCAmelCase : List[str] = get_resize_output_image_size(a__ , size=size["""shortest_edge"""] , default_to_square=a__ )
return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ , ):
_lowerCAmelCase : Tuple = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}" )
return center_crop(a__ , size=(size["""height"""], size["""width"""]) , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ = None , **a__ ):
return rescale(a__ , scale=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ , a__ , a__ = None , **a__ , ):
return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ )
def __A ( self , a__ , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = None , a__ = ChannelDimension.FIRST , **a__ , ):
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Optional[Any] = size if size is not None else self.size
_lowerCAmelCase : Optional[int] = get_size_dict(a__ , default_to_square=a__ )
_lowerCAmelCase : Dict = resample if resample is not None else self.resample
_lowerCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[Any] = get_size_dict(a__ , param_name="""crop_size""" )
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[int] = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_lowerCAmelCase : List[str] = [to_numpy_array(a__ ) for image in images]
if do_resize:
_lowerCAmelCase : Dict = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Optional[Any] = [self.center_crop(image=a__ , size=a__ ) for image in images]
if do_rescale:
_lowerCAmelCase : str = [self.rescale(image=a__ , scale=a__ ) for image in images]
if do_normalize:
_lowerCAmelCase : int = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images]
_lowerCAmelCase : Dict = [to_channel_dimension_format(a__ , a__ ) for image in images]
_lowerCAmelCase : Union[str, Any] = {"""pixel_values""": images}
return BatchFeature(data=a__ , tensor_type=a__ )
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a__ ) != len(a__ ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(a__ ):
_lowerCAmelCase : str = target_sizes.numpy()
_lowerCAmelCase : str = []
for idx in range(len(a__ ) ):
_lowerCAmelCase : Any = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=a__ )
_lowerCAmelCase : Union[str, Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a__ )
else:
_lowerCAmelCase : Optional[Any] = logits.argmax(dim=1 )
_lowerCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 126
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_a : Tuple = None
_a : str = logging.get_logger(__name__)
_a : str = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_a : Tuple = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_a : int = {
'facebook/nllb-large-en-ro': 1_024,
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
_a : Union[str, Any] = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = ["input_ids", "attention_mask"]
_UpperCamelCase : Tuple = NllbTokenizer
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , a__=None , a__=None , a__="<s>" , a__="</s>" , a__="</s>" , a__="<s>" , a__="<unk>" , a__="<pad>" , a__="<mask>" , a__=None , a__=None , a__=None , a__=False , **a__ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
_lowerCAmelCase : int = legacy_behaviour
super().__init__(
vocab_file=a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , src_lang=a__ , tgt_lang=a__ , additional_special_tokens=a__ , legacy_behaviour=a__ , **a__ , )
_lowerCAmelCase : List[Any] = vocab_file
_lowerCAmelCase : List[str] = False if not self.vocab_file else True
_lowerCAmelCase : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
_lowerCAmelCase : Tuple = {
lang_code: self.convert_tokens_to_ids(a__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
_lowerCAmelCase : List[str] = src_lang if src_lang is not None else """eng_Latn"""
_lowerCAmelCase : int = self.convert_tokens_to_ids(self._src_lang )
_lowerCAmelCase : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __A ( self ):
return self._src_lang
@src_lang.setter
def __A ( self , a__ ):
_lowerCAmelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __A ( self , a__ , a__ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __A ( self , a__ , a__ , a__ , a__ , **a__ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : str = src_lang
_lowerCAmelCase : List[Any] = self(a__ , add_special_tokens=a__ , return_tensors=a__ , **a__ )
_lowerCAmelCase : Dict = self.convert_tokens_to_ids(a__ )
_lowerCAmelCase : Dict = tgt_lang_id
return inputs
def __A ( self , a__ , a__ = "eng_Latn" , a__ = None , a__ = "fra_Latn" , **a__ , ):
_lowerCAmelCase : List[Any] = src_lang
_lowerCAmelCase : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(a__ , a__ , **a__ )
def __A ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __A ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __A ( self , a__ ):
_lowerCAmelCase : Optional[Any] = self.convert_tokens_to_ids(a__ )
if self.legacy_behaviour:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : str = [self.cur_lang_code]
_lowerCAmelCase : List[Any] = [self.eos_token_id]
_lowerCAmelCase : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : Any = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : Optional[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ ):
_lowerCAmelCase : Any = self.convert_tokens_to_ids(a__ )
if self.legacy_behaviour:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : str = [self.eos_token_id, self.cur_lang_code]
else:
_lowerCAmelCase : int = [self.cur_lang_code]
_lowerCAmelCase : Optional[int] = [self.eos_token_id]
_lowerCAmelCase : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
_lowerCAmelCase : int = self.convert_ids_to_tokens(self.suffix_tokens )
_lowerCAmelCase : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __A ( self , a__ , a__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
_lowerCAmelCase : Optional[Any] = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 126
| 1
|
def a_ ( __lowercase : str , __lowercase : str ) -> bool:
_snake_case = len(__lowercase ) + 1
_snake_case = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_snake_case = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_snake_case = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_snake_case = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_snake_case = dp[0][j - 2] if pattern[j - 1] == '*' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_snake_case = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_snake_case = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_snake_case = dp[i - 1][j]
else:
_snake_case = 0
else:
_snake_case = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_lowerCamelCase : Any = '''aab'''
_lowerCamelCase : Dict = '''c*a*b'''
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}')
| 282
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
| 282
| 1
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A_ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase( __a ):
'''simple docstring'''
def __init__( self: Any, a_: int = 101 ):
'''simple docstring'''
_snake_case : int = length
def __len__( self: Any ):
'''simple docstring'''
return self.length
def __getitem__( self: List[str], a_: List[Any] ):
'''simple docstring'''
return i
class lowercase:
'''simple docstring'''
def __call__( self: int, a_: Dict ):
'''simple docstring'''
return {"input_ids": torch.tensor(a__ ), "labels": torch.tensor(a__ )}
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_snake_case : str = nn.Linear(120, 80 )
def UpperCamelCase_ ( self: Optional[Any], a_: int, a_: List[Any]=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class lowercase( __a ):
'''simple docstring'''
@require_torch_neuroncore
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = f"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_snake_case : List[str] = self.get_auto_remove_tmp_dir()
_snake_case : Dict = f"--output_dir {output_dir}".split()
_snake_case : Tuple = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase( __a ):
'''simple docstring'''
@require_torch_multi_gpu
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
_snake_case : Union[str, Any] = f"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
_snake_case : str = self.get_auto_remove_tmp_dir()
_snake_case : str = f"--output_dir {output_dir}".split()
_snake_case : Any = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(a__, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A_ = HfArgumentParser((TrainingArguments,))
A_ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
A_ = DummyDataset(dataset_length)
def UpperCAmelCase__ (snake_case__ : Any ):
"""simple docstring"""
_snake_case : Tuple = list(range(len(snake_case__ ) ) )
_snake_case : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
A_ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ = 2
A_ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A_ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A_ = None
| 360
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 1_00_00_00 ):
"""simple docstring"""
_snake_case : Dict = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , snake_case__ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 132
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = """▁"""
snake_case_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
snake_case_ = {
"""xlm-roberta-base""": 512,
"""xlm-roberta-large""": 512,
"""xlm-roberta-large-finetuned-conll02-dutch""": 512,
"""xlm-roberta-large-finetuned-conll02-spanish""": 512,
"""xlm-roberta-large-finetuned-conll03-english""": 512,
"""xlm-roberta-large-finetuned-conll03-german""": 512,
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int]="<s>" , lowercase_ :Optional[Any]="</s>" , lowercase_ :Optional[int]="</s>" , lowercase_ :List[str]="<s>" , lowercase_ :Tuple="<unk>" , lowercase_ :Union[str, Any]="<pad>" , lowercase_ :int="<mask>" , lowercase_ :Optional[Dict[str, Any]] = None , **lowercase_ :Optional[int] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , **lowercase_ , )
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase_ ) )
UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase = 1
UpperCAmelCase = len(self.sp_model ) + self.fairseq_offset
UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.__dict__.copy()
UpperCAmelCase = None
UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self :str , lowercase_ :Dict ) -> Dict:
UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase = {}
UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self :Any , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self :int , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None , lowercase_ :bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowercase_ )) + [1]
return [1] + ([0] * len(lowercase_ )) + [1, 1] + ([0] * len(lowercase_ )) + [1]
def UpperCAmelCase__ ( self :Optional[Any] , lowercase_ :List[int] , lowercase_ :Optional[List[int]] = None ) -> List[int]:
UpperCAmelCase = [self.sep_token_id]
UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCAmelCase__ ( self :Optional[int] ) -> Optional[int]:
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def UpperCAmelCase__ ( self :Optional[Any] ) -> Optional[Any]:
UpperCAmelCase = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self :int , lowercase_ :str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase__ ( self :Tuple , lowercase_ :Optional[Any] ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase = self.sp_model.PieceToId(lowercase_ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self :Dict , lowercase_ :Any ) -> Dict:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase = ''.join(lowercase_ ).replace(lowercase_ , ' ' ).strip()
return out_string
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowercase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase = os.path.join(
lowercase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (out_vocab_file,)
| 78
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_ ):
UpperCAmelCase = x
UpperCAmelCase = y
for step in range(lowercase_ ): # noqa: B007
UpperCAmelCase = a * a - b * b + x
UpperCAmelCase = 2 * a * b + y
UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def _lowerCAmelCase ( lowercase_ ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(lowercase_ , 1 , 1 ) )
def _lowerCAmelCase ( lowercase_ = 800 , lowercase_ = 600 , lowercase_ = -0.6 , lowercase_ = 0 , lowercase_ = 3.2 , lowercase_ = 50 , lowercase_ = True , ):
UpperCAmelCase = Image.new('RGB' , (image_width, image_height) )
UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(lowercase_ ):
for image_y in range(lowercase_ ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase = figure_width / image_width * image_height
UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase = get_distance(lowercase_ , lowercase_ , lowercase_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase = get_color_coded_rgb(lowercase_ )
else:
UpperCAmelCase = get_black_and_white_rgb(lowercase_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 78
| 1
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : Optional[Any] = {"""vocab_file""": """spiece.model"""}
_a : int = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
_a : Optional[int] = {
"""t5-small""": 5_1_2,
"""t5-base""": 5_1_2,
"""t5-large""": 5_1_2,
"""t5-3b""": 5_1_2,
"""t5-11b""": 5_1_2,
}
_a : Dict = """▁"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : int =VOCAB_FILES_NAMES
a : int =PRETRAINED_VOCAB_FILES_MAP
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE=1_00,__SCREAMING_SNAKE_CASE=None,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE=True,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
if extra_ids > 0 and additional_special_tokens is None:
__lowerCAmelCase = [f'<extra_id_{i}>' for i in range(__SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCAmelCase = len(set(filter(lambda __SCREAMING_SNAKE_CASE : bool("""extra_id""" in str(__SCREAMING_SNAKE_CASE ) ),__SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
""" read the related pull request available at https://github.com/huggingface/transformers/pull/24565""" )
__lowerCAmelCase = legacy
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,extra_ids=__SCREAMING_SNAKE_CASE,additional_special_tokens=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,legacy=__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = vocab_file
__lowerCAmelCase = extra_ids
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowerCamelCase__ ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
__lowerCAmelCase = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""",__SCREAMING_SNAKE_CASE,)
return max_model_length
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self ):
'''simple docstring'''
return list(
set(filter(lambda __SCREAMING_SNAKE_CASE : bool(re.search(R"""<extra_id_\d+>""",__SCREAMING_SNAKE_CASE ) ) is not None,self.additional_special_tokens ) ) )
def lowerCamelCase__ ( self ):
'''simple docstring'''
return [self._convert_token_to_id(__SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
__lowerCAmelCase = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not self.legacy:
__lowerCAmelCase = SPIECE_UNDERLINE + text.replace(__SCREAMING_SNAKE_CASE,""" """ )
return super().tokenize(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not self.legacy:
__lowerCAmelCase = text.startswith(__SCREAMING_SNAKE_CASE )
if is_first:
__lowerCAmelCase = text[1:]
__lowerCAmelCase = self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(""" """ ) and tokens[0].startswith(__SCREAMING_SNAKE_CASE ):
__lowerCAmelCase = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token.startswith("""<extra_id_""" ):
__lowerCAmelCase = re.match(R"""<extra_id_(\d+)>""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index < self.sp_model.get_piece_size():
__lowerCAmelCase = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = []
__lowerCAmelCase = """"""
__lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
__lowerCAmelCase = True
__lowerCAmelCase = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Optional[int] =KandinskyVaaImgaImgPipeline
a : List[Any] =["""image_embeds""", """negative_image_embeds""", """image"""]
a : Union[str, Any] =[
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a : Optional[int] =[
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a : Optional[Any] =False
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 32
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return 1_00
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = self.dummy_unet
__lowerCAmelCase = self.dummy_movq
__lowerCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__lowerCAmelCase = DDIMScheduler(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=0 ):
'''simple docstring'''
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size),rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
# create init_image
__lowerCAmelCase = floats_tensor((1, 3, 64, 64),rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = image.cpu().permute(0,2,3,1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = {
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = """cpu"""
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = output.images
__lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ),return_dict=__SCREAMING_SNAKE_CASE,)[0]
__lowerCAmelCase = image[0, -3:, -3:, -1]
__lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase = np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
__lowerCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__lowerCAmelCase = """A red cartoon frog, 4k"""
__lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""",torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""",torch_dtype=torch.floataa )
__lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=5,negative_prompt="""""",).to_tuple()
__lowerCAmelCase = pipeline(
image=__SCREAMING_SNAKE_CASE,image_embeds=__SCREAMING_SNAKE_CASE,negative_image_embeds=__SCREAMING_SNAKE_CASE,generator=__SCREAMING_SNAKE_CASE,num_inference_steps=1_00,height=7_68,width=7_68,strength=0.2,output_type="""np""",)
__lowerCAmelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
| 46
| 1
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE( __lowercase="" ) -> str:
A: Tuple = tempfile.mkdtemp()
return os.path.join(__lowercase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : Tuple ) -> str:
'''simple docstring'''
A: Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5
A: Tuple = AgentAudio(SCREAMING_SNAKE_CASE_ )
A: Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
# Ensure that the file contains the same value as the original tensor
A , A: Any = sf.read(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , torch.tensor(SCREAMING_SNAKE_CASE_ ) , atol=1E-4 ) )
def _snake_case ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A: Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
A: Dict = get_new_path(suffix='''.wav''' )
sf.write(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1_60_00 )
A: Union[str, Any] = AgentAudio(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , SCREAMING_SNAKE_CASE_ )
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : int ) -> Optional[int]:
'''simple docstring'''
A: int = torch.randint(0 , 2_56 , (64, 64, 3) )
A: Optional[int] = AgentImage(SCREAMING_SNAKE_CASE_ )
A: Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : int ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
A: Dict = Image.open(SCREAMING_SNAKE_CASE_ )
A: int = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
def _snake_case ( self : Optional[Any] ) -> str:
'''simple docstring'''
A: List[str] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
A: int = Image.open(SCREAMING_SNAKE_CASE_ )
A: Dict = AgentImage(SCREAMING_SNAKE_CASE_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE_ ) )
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : str ) -> Optional[Any]:
'''simple docstring'''
A: Union[str, Any] = '''Hey!'''
A: Optional[Any] = AgentText(SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , agent_type.to_string() )
self.assertEqual(SCREAMING_SNAKE_CASE_ , agent_type.to_raw() )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 319
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE( __lowercase ) -> int:
if not isinstance(__lowercase , __lowercase ):
raise TypeError('''only integers accepted as input''' )
else:
A: str = str(abs(__lowercase ) )
A: int = [list(__lowercase ) for char in range(len(__lowercase ) )]
for index in range(len(__lowercase ) ):
num_transpositions[index].pop(__lowercase )
return max(
int(''''''.join(list(__lowercase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 319
| 1
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = DownBlockaD # noqa F405
A : Optional[int] = 'down'
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[Any] = ResnetDownsampleBlockaD # noqa F405
A : List[Any] = 'down'
def __lowerCamelCase ( self ):
lowercase : Optional[int] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = AttnDownBlockaD # noqa F405
A : Tuple = 'down'
def __lowerCamelCase ( self ):
lowercase : int = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Any = CrossAttnDownBlockaD # noqa F405
A : Optional[Any] = 'down'
def __lowerCamelCase ( self ):
lowercase : Dict = super().prepare_init_args_and_inputs_for_common()
lowercase : Dict = 32
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Dict = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = SimpleCrossAttnDownBlockaD # noqa F405
A : Union[str, Any] = 'down'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : int = super().prepare_init_args_and_inputs_for_common()
lowercase : List[str] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Tuple = SkipDownBlockaD # noqa F405
A : List[str] = 'down'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Dict = AttnSkipDownBlockaD # noqa F405
A : Optional[Any] = 'down'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Any = DownEncoderBlockaD # noqa F405
A : Optional[int] = 'down'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : str = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Any = AttnDownEncoderBlockaD # noqa F405
A : List[Any] = 'down'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Optional[int] = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowercase : Dict = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[Any] = UNetMidBlockaD # noqa F405
A : List[Any] = 'mid'
def __lowerCamelCase ( self ):
lowercase : Dict = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowercase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Dict = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[str] = UNetMidBlockaDCrossAttn # noqa F405
A : List[str] = 'mid'
def __lowerCamelCase ( self ):
lowercase : str = super().prepare_init_args_and_inputs_for_common()
lowercase : Optional[int] = 32
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : List[str] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = UNetMidBlockaDSimpleCrossAttn # noqa F405
A : Optional[Any] = 'mid'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : int = super().prepare_init_args_and_inputs_for_common()
lowercase : List[str] = 32
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Tuple = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Any = UpBlockaD # noqa F405
A : Dict = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Dict = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : str = ResnetUpsampleBlockaD # noqa F405
A : List[str] = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Any = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[Any] = CrossAttnUpBlockaD # noqa F405
A : str = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[str] = super().prepare_init_args_and_inputs_for_common()
lowercase : Dict = 32
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Tuple = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Tuple = SimpleCrossAttnUpBlockaD # noqa F405
A : Dict = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ , include_encoder_hidden_states=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : str = super().prepare_init_args_and_inputs_for_common()
lowercase : Any = 32
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : List[Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Union[str, Any] = AttnUpBlockaD # noqa F405
A : Optional[int] = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def __lowerCamelCase ( self ):
lowercase : Tuple = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = SkipUpBlockaD # noqa F405
A : Optional[int] = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : List[Any] = AttnSkipUpBlockaD # noqa F405
A : str = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : List[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[Any] = UpDecoderBlockaD # noqa F405
A : Dict = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : str = {'''in_channels''': 32, '''out_channels''': 32}
lowercase : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : str = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(SCREAMING_SNAKE_CASE__ )
class __SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
A : Optional[int] = AttnUpDecoderBlockaD # noqa F405
A : str = 'up'
@property
def __lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self ):
lowercase : Any = {'''in_channels''': 32, '''out_channels''': 32}
lowercase : str = self.dummy_input
return init_dict, inputs_dict
def __lowerCamelCase ( self ):
lowercase : Tuple = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(SCREAMING_SNAKE_CASE__ )
| 365
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : Any = os.path.abspath(_UpperCamelCase )
logger.info(f"""Converting TensorFlow checkpoint from {tf_path}""" )
# Load weights from TF model
lowercase : Optional[int] = tf.train.list_variables(_UpperCamelCase )
lowercase : Optional[int] = []
lowercase : Optional[int] = []
lowercase : Optional[Any] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase : int = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f"""Skipping non-model layer {full_name}""" )
continue
if "optimizer" in full_name:
logger.info(f"""Skipping optimization layer {full_name}""" )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase : List[Any] = name[1:]
# figure out how many levels deep the name is
lowercase : Optional[int] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_UpperCamelCase )
# read data
lowercase : Any = tf.train.load_variable(_UpperCamelCase, _UpperCamelCase )
names.append('''/'''.join(_UpperCamelCase ) )
arrays.append(_UpperCamelCase )
logger.info(f"""Read a total of {len(_UpperCamelCase ):,} layers""" )
# Sanity check
if len(set(_UpperCamelCase ) ) != 1:
raise ValueError(f"""Found layer names with different depths (layer depth {list(set(_UpperCamelCase ) )})""" )
lowercase : List[str] = list(set(_UpperCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_UpperCamelCase, _UpperCamelCase ):
lowercase : Optional[int] = full_name.split('''/''' )
lowercase : Tuple = model
lowercase : Any = []
for i, m_name in enumerate(_UpperCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase : Tuple = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase : int = getattr(_UpperCamelCase, '''embeddings''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase : int = getattr(_UpperCamelCase, '''encoder''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''layer''' )
lowercase : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase : str = getattr(_UpperCamelCase, '''pooler''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase : str = getattr(_UpperCamelCase, '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase : List[Any] = getattr(_UpperCamelCase, '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase : int = getattr(_UpperCamelCase, '''token_type_embeddings''' )
else:
raise ValueError(f"""Unknown embedding layer with name {full_name}""" )
trace.append('''weight''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase : Tuple = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''attention''' )
lowercase : Any = getattr(_UpperCamelCase, '''output''' )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase : Union[str, Any] = getattr(_UpperCamelCase, '''attention''' )
lowercase : str = getattr(_UpperCamelCase, '''output''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''output''' )
lowercase : str = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase : Dict = getattr(_UpperCamelCase, '''output''' )
lowercase : int = getattr(_UpperCamelCase, '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase : Dict = getattr(_UpperCamelCase, '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase : Optional[Any] = getattr(_UpperCamelCase, '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase : List[str] = getattr(_UpperCamelCase, '''intermediate''' )
lowercase : Optional[int] = getattr(_UpperCamelCase, '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase : Tuple = getattr(_UpperCamelCase, '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase : str = getattr(_UpperCamelCase, '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase : Dict = getattr(_UpperCamelCase, '''weight''' )
else:
logger.warning(f"""Ignored {m_name}""" )
# for certain layers reshape is necessary
lowercase : Any = '''.'''.join(_UpperCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''', _UpperCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''', _UpperCamelCase ):
lowercase : Any = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase : List[str] = array.transpose()
if pointer.shape == array.shape:
lowercase : Optional[Any] = torch.from_numpy(_UpperCamelCase )
else:
raise ValueError(
f"""Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:"""
f""" {array.shape}""" )
logger.info(f"""Successfully set variable {full_name} to PyTorch layer {trace}""" )
return model
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Tuple:
"""simple docstring"""
logger.info(f"""Loading model based on config from {config_path}...""" )
lowercase : List[Any] = BertConfig.from_json_file(_UpperCamelCase )
lowercase : Dict = BertModel(_UpperCamelCase )
# Load weights from checkpoint
logger.info(f"""Loading weights from checkpoint {tf_checkpoint_path}...""" )
load_tfa_weights_in_bert(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
# Save pytorch-model
logger.info(f"""Saving PyTorch model to {pytorch_dump_path}...""" )
torch.save(model.state_dict(), _UpperCamelCase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
__a = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 173
| 0
|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowerCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowerCAmelCase = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = CamembertTokenizer
SCREAMING_SNAKE_CASE_ = CamembertTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : str =CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ='<pad>'
lowerCamelCase__ : Union[str, Any] =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase_ ) , lowerCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase_ ) , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(lowerCamelCase_ ) , 1_004 )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =CamembertTokenizer(lowerCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
lowerCamelCase__ : int =CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowerCamelCase__ : List[str] ='I was born in 92000, and this is falsé.'
lowerCamelCase__ : Optional[Any] =tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : int =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowerCamelCase__ : Optional[Any] =tokenizer.convert_ids_to_tokens(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase__ : str =self.get_tokenizer()
lowerCamelCase__ : List[str] =self.get_rust_tokenizer()
lowerCamelCase__ : Any ='I was born in 92000, and this is falsé.'
lowerCamelCase__ : Union[str, Any] =tokenizer.tokenize(lowerCamelCase_ )
lowerCamelCase__ : Dict =rust_tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
lowerCamelCase__ : Any =rust_tokenizer.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_rust_tokenizer()
lowerCamelCase__ : Any =tokenizer.encode(lowerCamelCase_ )
lowerCamelCase__ : Dict =rust_tokenizer.encode(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple ={'input_ids': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowerCamelCase__ : List[Any] =[
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=lowerCamelCase_ , )
| 126
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCAmelCase = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase_ ( ) ->Tuple:
lowerCamelCase__ : Dict =_ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCamelCase__ : int =get_sagemaker_input()
else:
lowerCamelCase__ : List[str] =get_cluster_input()
return config
def lowerCAmelCase_ ( snake_case_ : List[Any]=None ) ->List[str]:
if subparsers is not None:
lowerCamelCase__ : Union[str, Any] =subparsers.add_parser('config' , description=snake_case_ )
else:
lowerCamelCase__ : Tuple =argparse.ArgumentParser('Accelerate config command' , description=snake_case_ )
parser.add_argument(
'--config_file' , default=snake_case_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowerCAmelCase_ ( snake_case_ : str ) ->List[Any]:
lowerCamelCase__ : Optional[int] =get_user_input()
if args.config_file is not None:
lowerCamelCase__ : Dict =args.config_file
else:
if not os.path.isdir(snake_case_ ):
os.makedirs(snake_case_ )
lowerCamelCase__ : Optional[Any] =default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(snake_case_ )
else:
config.to_yaml_file(snake_case_ )
print(f"""accelerate configuration saved at {config_file}""" )
def lowerCAmelCase_ ( ) ->Optional[Any]:
lowerCamelCase__ : Tuple =config_command_parser()
lowerCamelCase__ : Tuple =parser.parse_args()
config_command(snake_case_ )
if __name__ == "__main__":
main()
| 126
| 1
|
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Optional[int] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) )
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=7_0_0_0_0 ):
'''simple docstring'''
_a : List[str] = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase__ ):
_a : Optional[Any] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
_a : Union[str, Any] = sigmoid_function(UpperCamelCase__ )
_a : List[str] = np.dot(x.T , h - y ) / y.size
_a : Any = theta - alpha * gradient # updating the weights
_a : Optional[int] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
_a : Any = sigmoid_function(UpperCamelCase__ )
_a : Any = cost_function(UpperCamelCase__ , UpperCamelCase__ )
if iterations % 1_0_0 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_snake_case = datasets.load_iris()
_snake_case = iris.data[:, :2]
_snake_case = (iris.target != 0) * 1
_snake_case = 0.1
_snake_case = logistic_reg(alpha, x, y, max_iterations=7_0000)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((_snake_case) , (_snake_case)) = (x[:, 0].min(), x[:, 0].max())
((_snake_case) , (_snake_case)) = (x[:, 1].min(), x[:, 1].max())
((_snake_case) , (_snake_case)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_snake_case = np.c_[xxa.ravel(), xxa.ravel()]
_snake_case = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 324
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
UpperCamelCase : Union[str, Any] = StableDiffusionXLImgaImgPipeline
UpperCamelCase : Any = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
UpperCamelCase : Tuple = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Any ) -> List[Any]:
torch.manual_seed(0 )
_a : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=80 , cross_attention_dim=64 , )
_a : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
_a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_a : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=32 , )
_a : Tuple = CLIPTextModel(UpperCAmelCase__ )
_a : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Dict = CLIPTextModelWithProjection(UpperCAmelCase__ )
_a : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=UpperCAmelCase__ )
_a : Any = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int=0 ) -> int:
_a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
_a : Any = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith("""mps""" ):
_a : Any = torch.manual_seed(UpperCAmelCase__ )
else:
_a : Tuple = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Optional[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.7_5,
}
return inputs
def _lowercase ( self : Any ) -> List[Any]:
_a : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : List[Any] = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Union[str, Any] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = sd_pipe(**UpperCAmelCase__ ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[str] = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowercase ( self : Any ) -> Any:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _lowercase ( self : List[Any] ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _lowercase ( self : Any ) -> Any:
pass
def _lowercase ( self : Tuple ) -> Union[str, Any]:
_a : int = self.get_dummy_components()
_a : Any = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
_a : Dict = sd_pipe.to(UpperCAmelCase__ )
_a : List[str] = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : List[str] = 3 * ["""this is a negative prompt"""]
_a : Dict = negative_prompt
_a : Dict = 3 * [inputs["""prompt"""]]
_a : Optional[Any] = sd_pipe(**UpperCAmelCase__ )
_a : Tuple = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
_a : int = self.get_dummy_inputs(UpperCAmelCase__ )
_a : Union[str, Any] = 3 * ["""this is a negative prompt"""]
_a : int = 3 * [inputs.pop("""prompt""" )]
(
(
_a
) , (
_a
) , (
_a
) , (
_a
) ,
) : List[str] = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
_a : Tuple = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
_a : Dict = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase ( self : List[str] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : str="cpu" , UpperCAmelCase__ : str=torch.floataa , UpperCAmelCase__ : List[Any]=0 ) -> List[str]:
_a : List[str] = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
_a : Union[str, Any] = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 64, 64) )
_a : List[Any] = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
_a : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self : int ) -> Union[str, Any]:
_a : Union[str, Any] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
_a : List[str] = self.get_inputs(UpperCAmelCase__ )
_a : Tuple = pipe(**UpperCAmelCase__ ).images
_a : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_a : int = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 324
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
UpperCAmelCase : int = {
"""huggingface/time-series-transformer-tourism-monthly""": (
"""https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"""
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """time_series_transformer"""
_lowercase : Union[str, Any] = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = [1, 2, 3, 4, 5, 6, 7] , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> List[str]:
'''simple docstring'''
a__ : int =prediction_length
a__ : List[Any] =context_length or prediction_length
a__ : int =distribution_output
a__ : List[str] =loss
a__ : Optional[int] =input_size
a__ : Dict =num_time_features
a__ : Any =lags_sequence
a__ : List[Any] =scaling
a__ : Optional[Any] =num_dynamic_real_features
a__ : Dict =num_static_real_features
a__ : Optional[int] =num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : Optional[Any] =cardinality
else:
a__ : List[Any] =[0]
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : List[str] =embedding_dimension
else:
a__ : int =[min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Any =num_parallel_samples
# Transformer architecture configuration
a__ : Optional[Any] =input_size * len(lowerCAmelCase__ ) + self._number_of_features
a__ : str =d_model
a__ : List[Any] =encoder_attention_heads
a__ : int =decoder_attention_heads
a__ : Tuple =encoder_ffn_dim
a__ : Optional[Any] =decoder_ffn_dim
a__ : Any =encoder_layers
a__ : Union[str, Any] =decoder_layers
a__ : Optional[int] =dropout
a__ : List[str] =attention_dropout
a__ : Union[str, Any] =activation_dropout
a__ : int =encoder_layerdrop
a__ : List[Any] =decoder_layerdrop
a__ : str =activation_function
a__ : Union[str, Any] =init_std
a__ : Optional[Any] =use_cache
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 95
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Union[List[PIL.Image.Image], np.ndarray]
_SCREAMING_SNAKE_CASE :Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 132
| 0
|
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
UpperCAmelCase = logging.get_logger(__name__)
def lowerCamelCase (a_ :Optional[Any]) -> int:
lowercase :Optional[int] = R'''\w+[.]\d+'''
lowercase :Optional[int] = re.findall(a_ , a_)
for pat in pats:
lowercase :str = key.replace(a_ , '''_'''.join(pat.split('''.''')))
return key
def lowerCamelCase (a_ :Union[str, Any] , a_ :List[Any] , a_ :Optional[int]) -> List[Any]:
lowercase :List[Any] = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
lowercase :str = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
lowercase :List[Any] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
lowercase :Union[str, Any] = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
lowercase :Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
lowercase :str = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
lowercase :Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
lowercase :Any = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
lowercase :List[Any] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
lowercase :int = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase (a_ :Dict , a_ :int , a_ :List[Any]=42) -> str:
# Step 1: Convert pytorch tensor to numpy
lowercase :int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
lowercase :List[str] = flax_model.init_weights(PRNGKey(a_))
lowercase :Optional[int] = flatten_dict(a_)
lowercase :Dict = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
lowercase :int = rename_key(a_)
lowercase :Union[str, Any] = tuple(renamed_pt_key.split('''.'''))
# Correctly rename weight parameters
lowercase , lowercase :List[str] = rename_key_and_reshape_tensor(a_ , a_ , a_)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
F"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""")
# also add unexpected weight so that warning is thrown
lowercase :int = jnp.asarray(a_)
return unflatten_dict(a_)
| 172
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''openai/imagegpt-small''': '''''',
'''openai/imagegpt-medium''': '''''',
'''openai/imagegpt-large''': '''''',
}
class __magic_name__ ( __UpperCAmelCase ):
__A : str = "imagegpt"
__A : str = ["past_key_values"]
__A : Optional[Any] = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=5_1_2 + 1 , snake_case__ : Optional[int]=3_2 * 3_2 , snake_case__ : Optional[Any]=5_1_2 , snake_case__ : List[str]=2_4 , snake_case__ : Any=8 , snake_case__ : str=None , snake_case__ : Any="quick_gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1e-5 , snake_case__ : List[Any]=0.02 , snake_case__ : Tuple=True , snake_case__ : Dict=True , snake_case__ : str=False , snake_case__ : Optional[int]=False , snake_case__ : Union[str, Any]=False , **snake_case__ : Union[str, Any] , ):
'''simple docstring'''
lowercase :int = vocab_size
lowercase :str = n_positions
lowercase :List[str] = n_embd
lowercase :int = n_layer
lowercase :List[str] = n_head
lowercase :Tuple = n_inner
lowercase :Tuple = activation_function
lowercase :Optional[Any] = resid_pdrop
lowercase :Tuple = embd_pdrop
lowercase :Dict = attn_pdrop
lowercase :List[Any] = layer_norm_epsilon
lowercase :List[Any] = initializer_range
lowercase :List[Any] = scale_attn_weights
lowercase :Dict = use_cache
lowercase :List[str] = scale_attn_by_inverse_layer_idx
lowercase :List[str] = reorder_and_upcast_attn
lowercase :Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=snake_case__ , **snake_case__ )
class __magic_name__ ( __UpperCAmelCase ):
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def __snake_case ( self : Union[str, Any] , snake_case__ : "FeatureExtractionMixin" , snake_case__ : int = 1 , snake_case__ : int = -1 , snake_case__ : bool = False , snake_case__ : Optional["TensorType"] = None , snake_case__ : int = 3 , snake_case__ : int = 3_2 , snake_case__ : int = 3_2 , ):
'''simple docstring'''
lowercase :Union[str, Any] = self._generate_dummy_images(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
lowercase :List[str] = dict(preprocessor(images=snake_case__ , return_tensors=snake_case__ ) )
return inputs
| 172
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = """Morse code here!"""
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = encrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
lowerCAmelCase = decrypt(SCREAMING_SNAKE_CASE )
print(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 46
| 1
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
UpperCAmelCase__ = ["""names""", """prefix"""]
UpperCAmelCase__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
UpperCAmelCase__ = ["""encoding_errors""", """on_bad_lines"""]
UpperCAmelCase__ = ["""date_format"""]
@dataclass
class __lowerCAmelCase ( datasets.BuilderConfig ):
UpperCamelCase = ","
UpperCamelCase = None
UpperCamelCase = "infer"
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = None
UpperCamelCase = "."
UpperCamelCase = None
UpperCamelCase = '"'
UpperCamelCase = 0
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = 0
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = None
UpperCamelCase = 1_0_0_0_0
UpperCamelCase = None
UpperCamelCase = "strict"
UpperCamelCase = "error"
UpperCamelCase = None
def _lowerCamelCase ( self : Dict) -> Tuple:
"""simple docstring"""
if self.delimiter is not None:
_UpperCAmelCase = self.delimiter
if self.column_names is not None:
_UpperCAmelCase = self.column_names
@property
def _lowerCamelCase ( self : Optional[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , _lowercase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __lowerCAmelCase ( datasets.ArrowBasedBuilder ):
UpperCamelCase = CsvConfig
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def _lowerCamelCase ( self : List[str] , A : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}")
_UpperCAmelCase = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_lowercase , (str, list, tuple)):
_UpperCAmelCase = data_files
if isinstance(_lowercase , _lowercase):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(_lowercase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
_UpperCAmelCase = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase):
_UpperCAmelCase = [files]
_UpperCAmelCase = [dl_manager.iter_files(_lowercase) for file in files]
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={'files': files}))
return splits
def _lowerCamelCase ( self : Union[str, Any] , A : Optional[int]) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCAmelCase = self.config.features.arrow_schema
if all(not require_storage_cast(_lowercase) for feature in self.config.features.values()):
# cheaper cast
_UpperCAmelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=_lowercase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCAmelCase = table_cast(_lowercase , _lowercase)
return pa_table
def _lowerCamelCase ( self : Any , A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCAmelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(_lowercase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase)):
_UpperCAmelCase = pd.read_csv(_lowercase , iterator=_lowercase , dtype=_lowercase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(_lowercase):
_UpperCAmelCase = pa.Table.from_pandas(_lowercase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowercase)
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(_lowercase)}: {e}")
raise
| 366
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''decision_transformer'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Any , A : Optional[int]=17 , A : List[str]=4 , A : int=1_28 , A : Union[str, Any]=40_96 , A : Any=True , A : Any=1 , A : List[Any]=10_24 , A : List[Any]=3 , A : Tuple=1 , A : Any=None , A : Optional[int]="relu" , A : Union[str, Any]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=0.1 , A : Optional[int]=1E-5 , A : List[Any]=0.0_2 , A : Tuple=True , A : Union[str, Any]=True , A : str=5_02_56 , A : Union[str, Any]=5_02_56 , A : List[Any]=False , A : Optional[int]=False , **A : int , ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = state_dim
_UpperCAmelCase = act_dim
_UpperCAmelCase = hidden_size
_UpperCAmelCase = max_ep_len
_UpperCAmelCase = action_tanh
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_positions
_UpperCAmelCase = n_layer
_UpperCAmelCase = n_head
_UpperCAmelCase = n_inner
_UpperCAmelCase = activation_function
_UpperCAmelCase = resid_pdrop
_UpperCAmelCase = embd_pdrop
_UpperCAmelCase = attn_pdrop
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scale_attn_weights
_UpperCAmelCase = use_cache
_UpperCAmelCase = scale_attn_by_inverse_layer_idx
_UpperCAmelCase = reorder_and_upcast_attn
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
super().__init__(bos_token_id=A , eos_token_id=A , **A)
| 290
| 0
|
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( *snake_case_ ):
'''simple docstring'''
if not isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = list(snake_case_ )
for i in range(len(snake_case_ ) ):
_UpperCAmelCase = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(snake_case_ , snake_case_ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def __SCREAMING_SNAKE_CASE ( snake_case_ = None , snake_case_ = 128 ):
'''simple docstring'''
if function is None:
return functools.partial(snake_case_ , starting_batch_size=snake_case_ )
_UpperCAmelCase = starting_batch_size
def decorator(*snake_case_ , **snake_case_ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
_UpperCAmelCase = list(inspect.signature(snake_case_ ).parameters.keys() )
# Guard against user error
if len(snake_case_ ) < (len(snake_case_ ) + 1):
_UpperCAmelCase = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("No executable batch size found, reached zero." )
try:
return function(snake_case_ , *snake_case_ , **snake_case_ )
except Exception as e:
if should_reduce_batch_size(snake_case_ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 133
|
"""simple docstring"""
def __magic_name__ ( lowercase , lowercase ):
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
SCREAMING_SNAKE_CASE_: Optional[int] =str(bin(lowercase ) )[2:] # remove the leading "0b"
SCREAMING_SNAKE_CASE_: Any =str(bin(lowercase ) )[2:]
SCREAMING_SNAKE_CASE_: Dict =max(len(lowercase ) , len(lowercase ) )
return "0b" + "".join(
str(int("""1""" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ) , b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 173
| 0
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( _a , _a , _a ):
"""simple docstring"""
lowercase = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Optional[int] , snake_case_ : int , snake_case_ : int , snake_case_ : Optional[int] = None , snake_case_ : int = 50_257 , snake_case_ : int = 1_024 , snake_case_ : int = 768 , snake_case_ : int = 12 , snake_case_ : int = 12 , snake_case_ : Optional[int] = None , snake_case_ : str = "gelu_new" , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 0.1 , snake_case_ : float = 1E-5 , snake_case_ : float = 0.02 , snake_case_ : bool = True , snake_case_ : bool = True , snake_case_ : bool = False , snake_case_ : bool = False , ):
super().__init__()
snake_case__ : Optional[int] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
snake_case__ : Dict = prefix_inner_dim
snake_case__ : Tuple = prefix_hidden_dim
snake_case__ : Optional[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case__ : List[str] = (
nn.Linear(self.prefix_hidden_dim , snake_case_ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case__ : Union[str, Any] = GPTaConfig(
vocab_size=snake_case_ , n_positions=snake_case_ , n_embd=snake_case_ , n_layer=snake_case_ , n_head=snake_case_ , n_inner=snake_case_ , activation_function=snake_case_ , resid_pdrop=snake_case_ , embd_pdrop=snake_case_ , attn_pdrop=snake_case_ , layer_norm_epsilon=snake_case_ , initializer_range=snake_case_ , scale_attn_weights=snake_case_ , use_cache=snake_case_ , scale_attn_by_inverse_layer_idx=snake_case_ , reorder_and_upcast_attn=snake_case_ , )
snake_case__ : Dict = GPTaLMHeadModel(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : torch.Tensor , snake_case_ : torch.Tensor , snake_case_ : Optional[torch.Tensor] = None , snake_case_ : Optional[torch.Tensor] = None , ):
snake_case__ : Optional[Any] = self.transformer.transformer.wte(snake_case_ )
snake_case__ : Tuple = self.encode_prefix(snake_case_ )
snake_case__ : Any = self.decode_prefix(snake_case_ )
snake_case__ : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case__ : Any = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case__ : int = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case__ : str = self.transformer(inputs_embeds=snake_case_ , labels=snake_case_ , attention_mask=snake_case_ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCamelCase ( self : Optional[Any] , snake_case_ : int , snake_case_ : torch.device ):
return torch.zeros(snake_case_ , self.prefix_length , dtype=torch.intaa , device=snake_case_ )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple ):
return self.encode_prefix(snake_case_ )
@torch.no_grad()
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
snake_case__ : Union[str, Any] = torch.split(snake_case_ , 1 , dim=0 )
snake_case__ : List[str] = []
snake_case__ : Optional[Any] = []
for feature in features:
snake_case__ : List[Any] = self.decode_prefix(feature.to(snake_case_ ) ) # back to the clip feature
# Only support beam search for now
snake_case__ , snake_case__ : Any = self.generate_beam(
input_embeds=snake_case_ , device=snake_case_ , eos_token_id=snake_case_ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case__ : Optional[Any] = torch.stack(snake_case_ )
snake_case__ : int = torch.stack(snake_case_ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCamelCase ( self : Optional[int] , snake_case_ : Optional[Any]=None , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]=None , snake_case_ : int = 5 , snake_case_ : int = 67 , snake_case_ : float = 1.0 , snake_case_ : Optional[int] = None , ):
snake_case__ : List[Any] = eos_token_id
snake_case__ : List[Any] = None
snake_case__ : List[str] = None
snake_case__ : str = torch.ones(snake_case_ , device=snake_case_ , dtype=torch.int )
snake_case__ : Union[str, Any] = torch.zeros(snake_case_ , device=snake_case_ , dtype=torch.bool )
if input_embeds is not None:
snake_case__ : int = input_embeds
else:
snake_case__ : Optional[int] = self.transformer.transformer.wte(snake_case_ )
for i in range(snake_case_ ):
snake_case__ : Optional[Any] = self.transformer(inputs_embeds=snake_case_ )
snake_case__ : Optional[Any] = outputs.logits
snake_case__ : Any = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case__ : Any = logits.softmax(-1 ).log()
if scores is None:
snake_case__ , snake_case__ : Optional[Any] = logits.topk(snake_case_ , -1 )
snake_case__ : int = generated.expand(snake_case_ , *generated.shape[1:] )
snake_case__ , snake_case__ : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case__ : Tuple = next_tokens
else:
snake_case__ : Any = tokens.expand(snake_case_ , *tokens.shape[1:] )
snake_case__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case__ : Union[str, Any] = -float(np.inf )
snake_case__ : Optional[int] = 0
snake_case__ : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case__ : Any = scores_sum / seq_lengths[:, None]
snake_case__ , snake_case__ : Dict = scores_sum_average.view(-1 ).topk(snake_case_ , -1 )
snake_case__ : Any = next_tokens // scores_sum.shape[1]
snake_case__ : List[str] = seq_lengths[next_tokens_source]
snake_case__ : Optional[Any] = next_tokens % scores_sum.shape[1]
snake_case__ : List[Any] = next_tokens.unsqueeze(1 )
snake_case__ : Union[str, Any] = tokens[next_tokens_source]
snake_case__ : Tuple = torch.cat((tokens, next_tokens) , dim=1 )
snake_case__ : Optional[int] = generated[next_tokens_source]
snake_case__ : Optional[int] = scores_sum_average * seq_lengths
snake_case__ : Union[str, Any] = is_stopped[next_tokens_source]
snake_case__ : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case__ : Tuple = torch.cat((generated, next_token_embed) , dim=1 )
snake_case__ : str = is_stopped + next_tokens.eq(snake_case_ ).squeeze()
if is_stopped.all():
break
snake_case__ : Tuple = scores / seq_lengths
snake_case__ : List[Any] = scores.argsort(descending=snake_case_ )
# tokens tensors are already padded to max_seq_length
snake_case__ : List[Any] = [tokens[i] for i in order]
snake_case__ : Dict = torch.stack(snake_case_ , dim=0 )
snake_case__ : List[str] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 43
|
'''simple docstring'''
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
snake_case__ : List[Any] = 0
snake_case__ : Union[str, Any] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : List[Any] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : str = 1
for i in range(0 , len(_lowerCAmelCase ) ):
total *= numbers[i]
snake_case__ : Optional[Any] = str(_lowerCAmelCase )
steps += 1
return steps
def __snake_case( _lowerCAmelCase ) -> int:
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
snake_case__ : Union[str, Any] = 0
snake_case__ : List[str] = str(_lowerCAmelCase )
while len(_lowerCAmelCase ) != 1:
snake_case__ : Optional[int] = [int(_lowerCAmelCase ) for i in num_string]
snake_case__ : Dict = 0
for i in range(0 , len(_lowerCAmelCase ) ):
total += numbers[i]
snake_case__ : List[Any] = str(_lowerCAmelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43
| 1
|
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a__ ( lowercase : Dict ) -> Tuple:
"""simple docstring"""
return 1 / (1 + np.exp(-z ))
def a__ ( lowercase : List[str], lowercase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return (-y * np.log(lowercase ) - (1 - y) * np.log(1 - h )).mean()
def a__ ( lowercase : List[Any], lowercase : Dict, lowercase : int ) -> Dict:
"""simple docstring"""
_UpperCamelCase = np.dot(lowercase, lowercase )
return np.sum(y * scores - np.log(1 + np.exp(lowercase ) ) )
def a__ ( lowercase : Dict, lowercase : List[str], lowercase : List[str], lowercase : int=70000 ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = np.zeros(x.shape[1] )
for iterations in range(lowercase ):
_UpperCamelCase = np.dot(lowercase, lowercase )
_UpperCamelCase = sigmoid_function(lowercase )
_UpperCamelCase = np.dot(x.T, h - y ) / y.size
_UpperCamelCase = theta - alpha * gradient # updating the weights
_UpperCamelCase = np.dot(lowercase, lowercase )
_UpperCamelCase = sigmoid_function(lowercase )
_UpperCamelCase = cost_function(lowercase, lowercase )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowercase__ : Union[str, Any] = datasets.load_iris()
lowercase__ : Optional[int] = iris.data[:, :2]
lowercase__ : int = (iris.target != 0) * 1
lowercase__ : Tuple = 0.1
lowercase__ : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a__ ( lowercase : str ) -> List[str]:
"""simple docstring"""
return sigmoid_function(
np.dot(lowercase, lowercase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((lowercase__) , (lowercase__)) : str = (x[:, 0].min(), x[:, 0].max())
((lowercase__) , (lowercase__)) : str = (x[:, 1].min(), x[:, 1].max())
((lowercase__) , (lowercase__)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowercase__ : Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
lowercase__ : Tuple = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 324
|
'''simple docstring'''
import math
def a__ ( lowercase : list, lowercase : int = 0, lowercase : int = 0 ) -> list:
"""simple docstring"""
_UpperCamelCase = end or len(lowercase )
for i in range(lowercase, lowercase ):
_UpperCamelCase = i
_UpperCamelCase = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
_UpperCamelCase = array[temp_index - 1]
temp_index -= 1
_UpperCamelCase = temp_index_value
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int ) -> None: # Max Heap
"""simple docstring"""
_UpperCamelCase = index
_UpperCamelCase = 2 * index + 1 # Left Node
_UpperCamelCase = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
_UpperCamelCase = left_index
if right_index < heap_size and array[largest] < array[right_index]:
_UpperCamelCase = right_index
if largest != index:
_UpperCamelCase , _UpperCamelCase = array[largest], array[index]
heapify(lowercase, lowercase, lowercase )
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
_UpperCamelCase = len(lowercase )
for i in range(n // 2, -1, -1 ):
heapify(lowercase, lowercase, lowercase )
for i in range(n - 1, 0, -1 ):
_UpperCamelCase , _UpperCamelCase = array[0], array[i]
heapify(lowercase, 0, lowercase )
return array
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int ) -> int:
"""simple docstring"""
_UpperCamelCase = low
_UpperCamelCase = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
_UpperCamelCase , _UpperCamelCase = array[j], array[i]
i += 1
def a__ ( lowercase : list ) -> list:
"""simple docstring"""
if len(lowercase ) == 0:
return array
_UpperCamelCase = 2 * math.ceil(math.loga(len(lowercase ) ) )
_UpperCamelCase = 16
return intro_sort(lowercase, 0, len(lowercase ), lowercase, lowercase )
def a__ ( lowercase : list, lowercase : int, lowercase : int, lowercase : int, lowercase : int ) -> list:
"""simple docstring"""
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowercase )
max_depth -= 1
_UpperCamelCase = median_of_a(lowercase, lowercase, start + ((end - start) // 2) + 1, end - 1 )
_UpperCamelCase = partition(lowercase, lowercase, lowercase, lowercase )
intro_sort(lowercase, lowercase, lowercase, lowercase, lowercase )
_UpperCamelCase = p
return insertion_sort(lowercase, lowercase, lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ : Any = input('Enter numbers separated by a comma : ').strip()
lowercase__ : Any = [float(item) for item in user_input.split(',')]
print(sort(unsorted))
| 324
| 1
|
def snake_case ( snake_case__ :int , snake_case__ :int) -> int:
while second != 0:
_A = first & second
first ^= second
_A = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input('Enter the first number: ').strip())
_SCREAMING_SNAKE_CASE = int(input('Enter the second number: ').strip())
print(F'''{add(first, second) = }''')
| 360
|
def snake_case ( snake_case__ :str , snake_case__ :str) -> list:
_A = len(snake_case__)
_A = []
for i in range(len(snake_case__) - pat_len + 1):
_A = True
for j in range(snake_case__):
if s[i + j] != pattern[j]:
_A = False
break
if match_found:
position.append(snake_case__)
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 81
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : Tuple = TransfoXLTokenizer
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = False
def _lowercase (self : Optional[int]) -> str:
super().setUp()
__snake_case : List[Any] = [
'<unk>',
'[CLS]',
'[SEP]',
'want',
'unwanted',
'wa',
'un',
'running',
',',
'low',
'l',
]
__snake_case : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def _lowercase (self : Any , **_A : Union[str, Any]) -> List[str]:
__snake_case : List[str] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A)
def _lowercase (self : Dict , _A : Dict) -> Union[str, Any]:
__snake_case : Any = '<unk> UNwanted , running'
__snake_case : Tuple = '<unk> unwanted, running'
return input_text, output_text
def _lowercase (self : int) -> Any:
__snake_case : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A)
__snake_case : Optional[int] = tokenizer.tokenize('<unk> UNwanted , running')
self.assertListEqual(_A , ['<unk>', 'unwanted', ',', 'running'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [0, 4, 8, 7])
def _lowercase (self : Optional[int]) -> List[str]:
__snake_case : Any = TransfoXLTokenizer(lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ') , ['hello', '!', 'how', 'are', 'you', '?'])
def _lowercase (self : int) -> List[Any]:
__snake_case : Optional[int] = TransfoXLTokenizer(lower_case=_A)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo ! how \n Are yoU ? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def _lowercase (self : Optional[Any]) -> int:
__snake_case : List[Any] = TransfoXLTokenizer(lower_case=_A)
__snake_case : Any = 'Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'
__snake_case : Dict = [
'Hello',
'(',
'bracket',
')',
'and',
'side',
'@-@',
'scrolled',
'[',
'and',
']',
'Henry',
'\'s',
'$',
'5',
'@,@',
'000',
'with',
'3',
'@.@',
'34',
'm',
'.',
'What',
'\'s',
'up',
'!',
'?',
]
self.assertListEqual(tokenizer.tokenize(_A) , _A)
self.assertEqual(tokenizer.convert_tokens_to_string(_A) , _A)
def _lowercase (self : Dict) -> int:
__snake_case : int = self.get_tokenizer()
__snake_case : int = len(_A)
tokenizer.add_tokens(['new1', 'new2'])
tokenizer.move_added_token('new1' , 1)
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A) , original_len + 2)
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('new1') , [1])
self.assertEqual(tokenizer.decode([1]) , 'new1')
| 172
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172
| 1
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
__a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
__a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
__a = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple=False ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = spearmanr(lowerCAmelCase__ , lowerCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17
|
'''simple docstring'''
from math import factorial
def __UpperCAmelCase ( a_: int = 100 ):
return sum(map(a_, str(factorial(a_ ) ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 17
| 1
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__ : List[Any] =logging.get_logger(__name__)
# TODO: upload to AWS
a__ : Optional[int] ={
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] ="retribert"
def __init__( self : int , __A : List[Any]=3_0_5_2_2 , __A : List[Any]=7_6_8 , __A : Union[str, Any]=8 , __A : Union[str, Any]=1_2 , __A : Optional[int]=3_0_7_2 , __A : int="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : Tuple=5_1_2 , __A : str=2 , __A : str=0.02 , __A : int=1e-12 , __A : Any=True , __A : Dict=1_2_8 , __A : Tuple=0 , **__A : int , ):
super().__init__(pad_token_id=__A , **__A )
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = share_encoders
__UpperCamelCase = projection_dim
| 53
|
"""simple docstring"""
from math import pi, sqrt, tan
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
a__: List[Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(_SCREAMING_SNAKE_CASE , 2 ) * torus_radius * tube_radius
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
a__: int = (sidea + sidea + sidea) / 2
a__: Tuple = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def __a ( _SCREAMING_SNAKE_CASE ) ->float:
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(10, 20) = }")
print(f"Square: {area_square(10) = }")
print(f"Triangle: {area_triangle(10, 10) = }")
print(f"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(f"Parallelogram: {area_parallelogram(10, 20) = }")
print(f"Rhombus: {area_rhombus(10, 20) = }")
print(f"Trapezium: {area_trapezium(10, 20, 30) = }")
print(f"Circle: {area_circle(20) = }")
print(f"Ellipse: {area_ellipse(10, 20) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(20) = }")
print(f"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(f"Sphere: {surface_area_sphere(20) = }")
print(f"Hemisphere: {surface_area_hemisphere(20) = }")
print(f"Cone: {surface_area_cone(10, 20) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(f"Cylinder: {surface_area_cylinder(10, 20) = }")
print(f"Torus: {surface_area_torus(20, 10) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(f"Square: {area_reg_polygon(4, 10) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 290
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase : List[Any] = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class __lowerCAmelCase ( unittest.TestCase):
_lowercase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_lowercase : Dict = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_lowercase : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : str =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
a__ : Any =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : Any =text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
a__ : Tuple =text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : List[Any] =text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
a__ : Any =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
a__ : List[str] =text_classifier("This is great !" , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
a__ : Optional[int] =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] , )
a__ : int =text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] , )
@require_torch
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
import torch
a__ : Dict =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ : Optional[int] =pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
a__ : Optional[Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self ) -> str:
'''simple docstring'''
a__ : Union[str, Any] =pipeline("text-classification" )
a__ : Union[str, Any] =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : Optional[Any] =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ : Tuple =pipeline("text-classification" , framework="tf" )
a__ : str =text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 1.0}] )
a__ : str =text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "NEGATIVE", "score": 1.0}] )
a__ : Dict =text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ : List[Any] =TextClassificationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ : Tuple =text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
a__ : List[Any] ="HuggingFace is in"
a__ : int =text_classifier(lowerCAmelCase__ )
self.assertEqual(nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
a__ : Optional[int] =["HuggingFace is in ", "Paris is in France"]
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}, {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
a__ : Union[str, Any] =text_classifier(lowerCAmelCase__ , top_k=lowerCAmelCase__ )
a__ : Optional[Any] =len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [[{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N, [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] * N] , )
a__ : List[str] ={"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
a__ : Optional[Any] =text_classifier(lowerCAmelCase__ )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , {"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
a__ : Any =[["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase__ ):
text_classifier(lowerCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
a__ : Optional[int] =text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase__ ) , [{"label": ANY(lowerCAmelCase__ ), "score": ANY(lowerCAmelCase__ )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 361
|
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.2_5) = }""")
print(F"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 148
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase_ )
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
a__ : ClassVar[Features] = Features({} )
a__ : str = "text"
@property
def UpperCamelCase__ ( self) -> Dict[str, str]:
return {self.text_column: "text"}
| 43
|
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Optional[Any] = ["""pixel_values"""]
def __init__( self , __lowercase = True , __lowercase = 32 , __lowercase=PILImageResampling.BILINEAR , __lowercase = True , **__lowercase , ) -> None:
__UpperCamelCase :Optional[int] = do_resize
__UpperCamelCase :Any = do_rescale
__UpperCamelCase :str = size_divisor
__UpperCamelCase :Dict = resample
super().__init__(**__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
__UpperCamelCase , __UpperCamelCase :int = get_image_size(__lowercase)
# Rounds the height and width down to the closest multiple of size_divisor
__UpperCamelCase :List[Any] = height // size_divisor * size_divisor
__UpperCamelCase :List[str] = width // size_divisor * size_divisor
__UpperCamelCase :str = resize(__lowercase , (new_h, new_w) , resample=__lowercase , data_format=__lowercase , **__lowercase)
return image
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase = None , **__lowercase) -> np.ndarray:
return rescale(image=__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase = None , __lowercase = None , __lowercase=None , __lowercase = None , __lowercase = None , __lowercase = ChannelDimension.FIRST , **__lowercase , ) -> BatchFeature:
__UpperCamelCase :Union[str, Any] = do_resize if do_resize is not None else self.do_resize
__UpperCamelCase :Tuple = do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase :List[str] = size_divisor if size_divisor is not None else self.size_divisor
__UpperCamelCase :List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('''size_divisor is required for resizing''')
__UpperCamelCase :List[Any] = make_list_of_images(__lowercase)
if not valid_images(__lowercase):
raise ValueError('''Invalid image(s)''')
# All transformations expect numpy arrays.
__UpperCamelCase :Optional[Any] = [to_numpy_array(__lowercase) for img in images]
if do_resize:
__UpperCamelCase :List[str] = [self.resize(__lowercase , size_divisor=__lowercase , resample=__lowercase) for image in images]
if do_rescale:
__UpperCamelCase :Dict = [self.rescale(__lowercase , scale=1 / 255) for image in images]
__UpperCamelCase :str = [to_channel_dimension_format(__lowercase , __lowercase) for image in images]
__UpperCamelCase :int = {'''pixel_values''': images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase)
| 43
| 1
|
import warnings
from functools import wraps
from typing import Callable
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Callable:
@wraps(lowercase )
def _inner_fn(*lowercase ,**lowercase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") ,lowercase ,)
return fn(*lowercase ,**lowercase )
return _inner_fn
| 176
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 176
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
a__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('''''', '''|''', '''|'''),
datarow=DataRow('''''', '''|''', '''|'''),
padding=1,
with_header_hide=None,
)
a__ = []
a__ = []
a__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
a__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"""emoji""": True,
},
}
]
a__ = 0
for log in Path().glob('''*.log'''):
a__ = 0
with open(log, '''r''') as f:
for line in f:
a__ = json.loads(line)
if line.get('''nodeid''', '''''') != "":
a__ = line["""nodeid"""]
if line.get('''duration''', None) is not None:
a__ = f'''{line["duration"]:.4f}'''
if line.get('''outcome''', '''''') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('''_''')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
a__ = []
log.unlink()
a__ = """"""
a__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
a__ = []
a__ = {}
for test in failed_tests:
a__ = test[0].split('''::''')
a__ = data[0].split('''/''')[-1]
if data[0] not in filesafailed:
a__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
a__ = [test[0] for test in failed_table]
a__ = list(set(files))
# Count number of instances in failed_tests
a__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
a__ = tabulate(
table,
headers=['''Test Location''', '''Num Failed'''],
tablefmt=hf_table_format,
stralign='''right''',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
a__ = """Too many failed tests, please see the full report in the Action results."""
a__ = len(err) + 10
a__ = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
a__ = """No failed tests! 🤗"""
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('''TEST_TYPE''', '''''') != "":
from slack_sdk import WebClient
a__ = WebClient(token=os.environ['''SLACK_API_TOKEN'''])
if message != "No failed tests! 🤗":
a__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
a__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
a__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
a__ = client.chat_postMessage(channel='''#accelerate-ci-daily''', text=message, blocks=payload)
a__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
a__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
a__ = row[0]
else:
a__ = """"""
a__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='''#accelerate-ci-daily''',
thread_ts=ts,
blocks=[payload],
)
| 235
|
"""simple docstring"""
def _A ( lowercase , lowercase ):
"""simple docstring"""
while second != 0:
a =first & second
first ^= second
a =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase_ : Dict = int(input("""Enter the first number: """).strip())
lowerCamelCase_ : List[Any] = int(input("""Enter the second number: """).strip())
print(F'{add(first, second) = }')
| 81
| 0
|
class __snake_case :
def __init__( self : Dict , _lowercase : int , _lowercase : int=None , _lowercase : List[Any]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = data
SCREAMING_SNAKE_CASE__ = previous
SCREAMING_SNAKE_CASE__ = next_node
def __str__( self : Optional[int] ):
"""simple docstring"""
return f"""{self.data}"""
def __a ( self : Dict ):
"""simple docstring"""
return self.data
def __a ( self : List[str] ):
"""simple docstring"""
return self.next
def __a ( self : List[str] ):
"""simple docstring"""
return self.previous
class __snake_case :
def __init__( self : Tuple , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = head
def __iter__( self : Optional[Any] ):
"""simple docstring"""
return self
def __a ( self : int ):
"""simple docstring"""
if not self.current:
raise StopIteration
else:
SCREAMING_SNAKE_CASE__ = self.current.get_data()
SCREAMING_SNAKE_CASE__ = self.current.get_next()
return value
class __snake_case :
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = None # First node in list
SCREAMING_SNAKE_CASE__ = None # Last node in list
def __str__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
SCREAMING_SNAKE_CASE__ = []
while current is not None:
nodes.append(current.get_data() )
SCREAMING_SNAKE_CASE__ = current.get_next()
return " ".join(str(_lowercase ) for node in nodes )
def __contains__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while current:
if current.get_data() == value:
return True
SCREAMING_SNAKE_CASE__ = current.get_next()
return False
def __iter__( self : Tuple ):
"""simple docstring"""
return LinkedListIterator(self.head )
def __a ( self : Dict ):
"""simple docstring"""
if self.head:
return self.head.get_data()
return None
def __a ( self : Optional[Any] ):
"""simple docstring"""
if self.tail:
return self.tail.get_data()
return None
def __a ( self : List[Any] , _lowercase : Node ):
"""simple docstring"""
if self.head is None:
SCREAMING_SNAKE_CASE__ = node
SCREAMING_SNAKE_CASE__ = node
else:
self.insert_before_node(self.head , _lowercase )
def __a ( self : int , _lowercase : Node ):
"""simple docstring"""
if self.head is None:
self.set_head(_lowercase )
else:
self.insert_after_node(self.tail , _lowercase )
def __a ( self : Union[str, Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
if self.head is None:
self.set_head(_lowercase )
else:
self.set_tail(_lowercase )
def __a ( self : int , _lowercase : Node , _lowercase : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = node
SCREAMING_SNAKE_CASE__ = node.previous
if node.get_previous() is None:
SCREAMING_SNAKE_CASE__ = node_to_insert
else:
SCREAMING_SNAKE_CASE__ = node_to_insert
SCREAMING_SNAKE_CASE__ = node_to_insert
def __a ( self : Union[str, Any] , _lowercase : Node , _lowercase : Node ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = node
SCREAMING_SNAKE_CASE__ = node.next
if node.get_next() is None:
SCREAMING_SNAKE_CASE__ = node_to_insert
else:
SCREAMING_SNAKE_CASE__ = node_to_insert
SCREAMING_SNAKE_CASE__ = node_to_insert
def __a ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = Node(_lowercase )
SCREAMING_SNAKE_CASE__ = self.head
while node:
if current_position == position:
self.insert_before_node(_lowercase , _lowercase )
return
current_position += 1
SCREAMING_SNAKE_CASE__ = node.next
self.insert_after_node(self.tail , _lowercase )
def __a ( self : List[str] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.head
while node:
if node.get_data() == item:
return node
SCREAMING_SNAKE_CASE__ = node.get_next()
raise Exception("""Node not found""" )
def __a ( self : List[Any] , _lowercase : Any ):
"""simple docstring"""
if (node := self.get_node(_lowercase )) is not None:
if node == self.head:
SCREAMING_SNAKE_CASE__ = self.head.get_next()
if node == self.tail:
SCREAMING_SNAKE_CASE__ = self.tail.get_previous()
self.remove_node_pointers(_lowercase )
@staticmethod
def __a ( _lowercase : Node ):
"""simple docstring"""
if node.get_next():
SCREAMING_SNAKE_CASE__ = node.previous
if node.get_previous():
SCREAMING_SNAKE_CASE__ = node.next
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.head is None
def __SCREAMING_SNAKE_CASE ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = checkpoint
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""encoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_in.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.conv_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""decoder.norm_out.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""quant_conv.bias"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.weight"""]
SCREAMING_SNAKE_CASE__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
SCREAMING_SNAKE_CASE__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
SCREAMING_SNAKE_CASE__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""down.{i}.block""", """new""": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE__ = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
SCREAMING_SNAKE_CASE__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""up.{block_id}.block""", """new""": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
SCREAMING_SNAKE_CASE__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": f"""mid.block_{i}""", """new""": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
SCREAMING_SNAKE_CASE__ = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str , __UpperCamelCase : str , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
SCREAMING_SNAKE_CASE__ = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE__ = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = 5_12
SCREAMING_SNAKE_CASE__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE__ = {}
with safe_open(__UpperCamelCase , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE__ = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["""state_dict"""]
# Convert the VAE model.
SCREAMING_SNAKE_CASE__ = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
__lowerCamelCase : Optional[int] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 204
| 1
|
"""simple docstring"""
from scipy.stats import spearmanr
import datasets
_a = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_a = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_a = r'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class _lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
} ), reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"], )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : List[str], UpperCAmelCase__ : Optional[int]=False ):
__lowercase = spearmanr(UpperCAmelCase__, UpperCAmelCase__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 17
|
"""simple docstring"""
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_a = 2
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Dict, *, # begin keyword-only arguments
UpperCAmelCase__ : str="<s>", UpperCAmelCase__ : Tuple="<pad>", UpperCAmelCase__ : str="</s>", UpperCAmelCase__ : Optional[Any]="<unk>", UpperCAmelCase__ : List[Any]=None, ):
__lowercase ,__lowercase ,__lowercase ,__lowercase = bos, unk, pad, eos
__lowercase = []
__lowercase = []
__lowercase = {}
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
__lowercase = self.add_symbol(UpperCAmelCase__ )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(UpperCAmelCase__ )
__lowercase = len(self.symbols )
def __eq__( self : List[str], UpperCAmelCase__ : Dict ):
return self.indices == other.indices
def __getitem__( self : Optional[int], UpperCAmelCase__ : List[str] ):
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self : str ):
return len(self.symbols )
def __contains__( self : Any, UpperCAmelCase__ : Optional[Any] ):
return sym in self.indices
@classmethod
def _lowercase ( cls : List[Any], UpperCAmelCase__ : Optional[Any] ):
__lowercase = cls()
d.add_from_file(UpperCAmelCase__ )
return d
def _lowercase ( self : Dict, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any]=1, UpperCAmelCase__ : str=False ):
if word in self.indices and not overwrite:
__lowercase = self.indices[word]
__lowercase = self.count[idx] + n
return idx
else:
__lowercase = len(self.symbols )
__lowercase = idx
self.symbols.append(UpperCAmelCase__ )
self.count.append(UpperCAmelCase__ )
return idx
def _lowercase ( self : Any, UpperCAmelCase__ : str ):
return 0
def _lowercase ( self : Tuple, UpperCAmelCase__ : List[Any] ):
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
try:
with open(UpperCAmelCase__, "r", encoding="utf-8" ) as fd:
self.add_from_file(UpperCAmelCase__ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(UpperCAmelCase__ ) )
return
__lowercase = f.readlines()
__lowercase = self._load_meta(UpperCAmelCase__ )
for line in lines[indices_start_line:]:
try:
__lowercase ,__lowercase = line.rstrip().rsplit(" ", 1 )
if field == "#fairseq:overwrite":
__lowercase = True
__lowercase ,__lowercase = line.rsplit(" ", 1 )
else:
__lowercase = False
__lowercase = int(UpperCAmelCase__ )
__lowercase = line
if word in self and not overwrite:
raise RuntimeError(
"Duplicate word found when loading Dictionary: '{}'. "
"Duplicate words can overwrite earlier ones by adding the "
"#fairseq:overwrite flag at the end of the corresponding row "
"in the dictionary file. If using the Camembert model, please "
"download an updated copy of the model file.".format(UpperCAmelCase__ ) )
self.add_symbol(UpperCAmelCase__, n=UpperCAmelCase__, overwrite=UpperCAmelCase__ )
except ValueError:
raise ValueError("Incorrect dictionary format, expected '<token> <cnt> [flags]'" )
def _A ( UpperCamelCase_ : int) -> str:
'''simple docstring'''
__lowercase = dict((re.sub(r"@@$", "", UpperCamelCase_), v) if k.endswith("@@") else (re.sub(r"$", "</w>", UpperCamelCase_), v) for k, v in d.items())
__lowercase = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F"""{k}</w>"""]
__lowercase = d[k] # restore
return da
def _A ( UpperCamelCase_ : str, UpperCamelCase_ : str) -> List[Any]:
'''simple docstring'''
if not os.path.exists(UpperCamelCase_):
raise ValueError(F"""path {biogpt_checkpoint_path} does not exist!""")
os.makedirs(UpperCamelCase_, exist_ok=UpperCamelCase_)
print(F"""Writing results to {pytorch_dump_folder_path}""")
# handle various types of models
__lowercase = os.path.join(UpperCamelCase_, "checkpoint.pt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {checkpoint_file} does not exist!""")
__lowercase = torch.load(UpperCamelCase_, map_location="cpu")
__lowercase = chkpt["cfg"]["model"]
# dicts
__lowercase = os.path.join(UpperCamelCase_, "dict.txt")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {dict_file} does not exist!""")
__lowercase = Dictionary.load(UpperCamelCase_)
__lowercase = rewrite_dict_keys(src_dict.indices)
__lowercase = len(UpperCamelCase_)
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["vocab_file"])
print(F"""Generating {src_vocab_file} of {src_vocab_size} records""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# merges_file (bpecodes)
__lowercase = os.path.join(UpperCamelCase_, "bpecodes")
if not os.path.isfile(UpperCamelCase_):
raise ValueError(F"""path to the file {bpecodes_file} does not exist!""")
__lowercase = os.path.join(UpperCamelCase_, VOCAB_FILES_NAMES["merges_file"])
shutil.copyfile(UpperCamelCase_, UpperCamelCase_)
# model config
__lowercase = os.path.join(UpperCamelCase_, "config.json")
__lowercase = {
"activation_dropout": args["activation_dropout"],
"architectures": ["BioGptForCausalLM"],
"attention_probs_dropout_prob": args["attention_dropout"],
"bos_token_id": 0,
"eos_token_id": 2,
"hidden_act": args["activation_fn"],
"hidden_dropout_prob": args["dropout"],
"hidden_size": args["decoder_embed_dim"],
"initializer_range": 0.02,
"intermediate_size": args["decoder_ffn_embed_dim"],
"layer_norm_eps": 1E-12,
"layerdrop": args["decoder_layerdrop"],
"max_position_embeddings": args["max_target_positions"],
"model_type": "biogpt",
"num_attention_heads": args["decoder_attention_heads"],
"num_hidden_layers": args["decoder_layers"],
"pad_token_id": 1,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_decoder_input_output_embed"],
"vocab_size": src_vocab_size,
}
# good hparam defaults to start with
print(F"""Generating {biogpt_model_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# tokenizer config
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
__lowercase = {
"bos_token": "<s>",
"eos_token": "</s>",
"model_max_length": 1024,
"pad_token": "<pad>",
"special_tokens_map_file": None,
"tokenizer_class": "BioGptTokenizer",
"unk_token": "<unk>",
}
print(F"""Generating {biogpt_tokenizer_config_file}""")
with open(UpperCamelCase_, "w", encoding="utf-8") as f:
f.write(json.dumps(UpperCamelCase_, ensure_ascii=UpperCamelCase_, indent=UpperCamelCase_))
# model
__lowercase = chkpt["model"]
# remove unneeded keys
__lowercase = [
"decoder.version",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase_, UpperCamelCase_)
__lowercase = list(model_state_dict.keys())
for layer_name in layer_names:
if layer_name.endswith("output_projection.weight"):
__lowercase = model_state_dict.pop(UpperCamelCase_)
else:
__lowercase = model_state_dict.pop(UpperCamelCase_)
__lowercase = BioGptConfig.from_pretrained(UpperCamelCase_)
__lowercase = BioGptForCausalLM(UpperCamelCase_)
# check that it loads ok
model_new.load_state_dict(UpperCamelCase_)
# save
__lowercase = os.path.join(UpperCamelCase_, UpperCamelCase_)
print(F"""Generating {pytorch_weights_dump_path}""")
torch.save(UpperCamelCase_, UpperCamelCase_)
print("Conversion is done!")
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 17
| 1
|
'''simple docstring'''
from itertools import count
def UpperCAmelCase_ (__a : int = 5_0 ):
"""simple docstring"""
_a : Optional[int] = [1] * min_block_length
for n in count(__a ):
fill_count_functions.append(1 )
for block_length in range(__a , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
|
'''simple docstring'''
def UpperCAmelCase_ (__a : int = 1_0**1_2 ):
"""simple docstring"""
_a : List[str] = 1
_a : Optional[int] = 0
_a : Any = 1
_a : List[str] = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase_ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ ):
snake_case_ = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case_ = Features({"""text""": Value("""string""" )} )
snake_case_ = Features({} )
snake_case_ = "text"
@property
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
return {self.text_column: "text"}
| 152
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( lowerCamelCase_ ):
def __init__( self , *SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
snake_case : Optional[Any] = eval_examples
snake_case : Any = post_process_function
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "eval" , **SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : Optional[int] = (
gen_kwargs["max_length"] if gen_kwargs.get("max_length" ) is not None else self.args.generation_max_length
)
snake_case : Any = (
gen_kwargs["num_beams"] if gen_kwargs.get("num_beams" ) is not None else self.args.generation_num_beams
)
snake_case : Optional[int] = gen_kwargs
snake_case : List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
snake_case : List[Any] = self.get_eval_dataloader(SCREAMING_SNAKE_CASE )
snake_case : Any = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : List[str] = self.compute_metrics
snake_case : Tuple = None
snake_case : str = time.time()
snake_case : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : List[Any] = eval_loop(
SCREAMING_SNAKE_CASE , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : List[str] = compute_metrics
snake_case : str = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
snake_case : Tuple = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case : List[Any] = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : Any = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
snake_case : List[str] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
snake_case : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , SCREAMING_SNAKE_CASE )
return metrics
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE = "test" , **SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Optional[int] = gen_kwargs.copy()
snake_case : int = self.get_test_dataloader(SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
snake_case : Optional[int] = self.compute_metrics
snake_case : Dict = None
snake_case : int = time.time()
snake_case : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
snake_case : Dict = eval_loop(
SCREAMING_SNAKE_CASE , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=SCREAMING_SNAKE_CASE , metric_key_prefix=SCREAMING_SNAKE_CASE , )
finally:
snake_case : Optional[int] = compute_metrics
snake_case : Dict = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
snake_case : int = self.post_process_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "predict" )
snake_case : Any = self.compute_metrics(SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
snake_case : List[str] = metrics.pop(SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=SCREAMING_SNAKE_CASE )
| 148
| 0
|
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 355
|
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
__lowerCamelCase : Dict = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() ,logging.get_verbosity())
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Optional[int] = logging.get_verbosity()
__lowerCamelCase : str = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Tuple = 'Testing 1, 2, 3'
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
# restore to the original level
logging.set_verbosity(SCREAMING_SNAKE_CASE__)
@mockenv(TRANSFORMERS_VERBOSITY='error')
def lowerCAmelCase ( self : Tuple):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
__lowerCamelCase : int = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : int = os.getenv('TRANSFORMERS_VERBOSITY' ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = logging.log_levels[env_level_str]
__lowerCamelCase : Tuple = logging.get_verbosity()
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,F"TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}" ,)
# restore to the original level
__lowerCamelCase : List[str] = ''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='super-error')
def lowerCAmelCase ( self : List[Any]):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : List[str] = logging.logging.getLogger()
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
# this action activates the env var
logging.get_logger('transformers.models.bart.tokenization_bart')
self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' ,cl.out)
# no need to restore as nothing was changed
def lowerCAmelCase ( self : Any):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
__lowerCamelCase : Tuple = logging.get_logger('transformers.models.bart.tokenization_bart')
__lowerCamelCase : Optional[int] = 'Testing 1, 2, 3'
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'):
# nothing should be logged as env var disables this method
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,'')
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(SCREAMING_SNAKE_CASE__) as cl:
logger.warning_advice(SCREAMING_SNAKE_CASE__)
self.assertEqual(cl.out ,msg + '\n')
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 113
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple ="""informer"""
A__ : Dict ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "student_t" , UpperCAmelCase_ : str = "nll" , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : List[int] = None , UpperCAmelCase_ : Optional[Union[str, bool]] = "mean" , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : int = 64 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : int = 2 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.05 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 100 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str = "prob" , UpperCAmelCase_ : int = 5 , UpperCAmelCase_ : bool = True , **UpperCAmelCase_ : Union[str, Any] , ):
# time series specific configuration
SCREAMING_SNAKE_CASE__ = prediction_length
SCREAMING_SNAKE_CASE__ = context_length or prediction_length
SCREAMING_SNAKE_CASE__ = distribution_output
SCREAMING_SNAKE_CASE__ = loss
SCREAMING_SNAKE_CASE__ = input_size
SCREAMING_SNAKE_CASE__ = num_time_features
SCREAMING_SNAKE_CASE__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE__ = scaling
SCREAMING_SNAKE_CASE__ = num_dynamic_real_features
SCREAMING_SNAKE_CASE__ = num_static_real_features
SCREAMING_SNAKE_CASE__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE__ = cardinality
else:
SCREAMING_SNAKE_CASE__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase_ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
SCREAMING_SNAKE_CASE__ = embedding_dimension
else:
SCREAMING_SNAKE_CASE__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE__ = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE__ = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = encoder_attention_heads
SCREAMING_SNAKE_CASE__ = decoder_attention_heads
SCREAMING_SNAKE_CASE__ = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ = encoder_layers
SCREAMING_SNAKE_CASE__ = decoder_layers
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = encoder_layerdrop
SCREAMING_SNAKE_CASE__ = decoder_layerdrop
SCREAMING_SNAKE_CASE__ = activation_function
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = use_cache
# Informer
SCREAMING_SNAKE_CASE__ = attention_type
SCREAMING_SNAKE_CASE__ = sampling_factor
SCREAMING_SNAKE_CASE__ = distil
super().__init__(is_encoder_decoder=UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def A_ ( self : Tuple ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 176
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : str =DebertaVaTokenizer
A__ : List[str] =DebertaVaTokenizerFast
A__ : Any =True
A__ : str =True
def A_ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , unk_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = 'this is a test'
SCREAMING_SNAKE_CASE__ = 'this is a test'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = '<pad>'
SCREAMING_SNAKE_CASE__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '[PAD]' )
self.assertEqual(len(UpperCAmelCase_ ) , 30001 )
def A_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Any ):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.' )
def A_ ( self : Tuple ):
pass
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : List[str] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Tuple ):
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Any ):
# fmt: off
SCREAMING_SNAKE_CASE__ = ' \tHeLLo!how \n Are yoU? '
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , split_by_punct=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ) )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ = 'This is a test'
SCREAMING_SNAKE_CASE__ = [13, 1, 4398, 25, 21, 1289]
SCREAMING_SNAKE_CASE__ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast(UpperCAmelCase_ , keep_accents=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
SCREAMING_SNAKE_CASE__ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
SCREAMING_SNAKE_CASE__ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('sequence builders' )
SCREAMING_SNAKE_CASE__ = tokenizer.encode('multi-sequence build' )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , UpperCAmelCase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , UpperCAmelCase_ , )
@slow
def A_ ( self : Optional[Any] ):
# fmt: off
SCREAMING_SNAKE_CASE__ = {'input_ids': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='microsoft/deberta-v2-xlarge' , revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' , )
| 176
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path / "file.csv"
__snake_case : str = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = tmp_path / "malformed_file.csv"
__snake_case : List[Any] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Optional[int] = tmp_path / "csv_with_image.csv"
__snake_case : Optional[Any] = textwrap.dedent(
F'\\n image\n {image_file}\n ' )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : str = tmp_path / "csv_with_label.csv"
__snake_case : Optional[int] = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
@pytest.fixture
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = tmp_path / "csv_with_int_list.csv"
__snake_case : Tuple = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(__lowerCamelCase , "w" ) as f:
f.write(__lowerCamelCase )
return str(__lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : int = Csv()
__snake_case : Union[str, Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(__lowerCamelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(__lowerCamelCase ) in record.message
for record in caplog.records )
@require_pil
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(__lowerCamelCase , encoding="utf-8" ) as f:
__snake_case : int = f.read().splitlines()[1]
__snake_case : Dict = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
__snake_case : List[str] = csv._generate_tables([[csv_file_with_image]] )
__snake_case : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
__snake_case : str = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCAmelCase_ ( __lowerCamelCase ):
with open(__lowerCamelCase , encoding="utf-8" ) as f:
__snake_case : Optional[int] = f.read().splitlines()[1:]
__snake_case : Any = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
__snake_case : str = csv._generate_tables([[csv_file_with_label]] )
__snake_case : List[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
__snake_case : Any = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(__lowerCamelCase ) for label in labels]
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda __lowerCamelCase : [int(__lowerCamelCase ) for i in x.split()]} )
__snake_case : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
__snake_case : str = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
__snake_case : str = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 134
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
_snake_case : Union[str, Any] = ["small", "medium", "large"]
_snake_case : List[Any] = "lm_head.decoder.weight"
_snake_case : Optional[Any] = "lm_head.weight"
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Tuple = torch.load(__lowerCamelCase )
__snake_case : Dict = d.pop(__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
parser.add_argument("--dialogpt_path", default=".", type=str)
_snake_case : Any = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
_snake_case : Dict = os.path.join(args.dialogpt_path, f'''{MODEL}_ft.pkl''')
_snake_case : List[str] = f'''./DialoGPT-{MODEL}'''
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 134
| 1
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = list(range(len(lowercase ) ) )
lowerCamelCase_ = [v / w for v, w in zip(lowercase , lowercase )]
index.sort(key=lambda lowercase : ratio[i] , reverse=lowercase )
lowerCamelCase_ = 0
lowerCamelCase_ = [0] * len(lowercase )
for i in index:
if weight[i] <= capacity:
lowerCamelCase_ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCamelCase_ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 204
|
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
lowerCamelCase : Any = logging.get_logger(__name__)
class A( UpperCamelCase ):
'''simple docstring'''
def __init__( self : Dict , *A_ : Optional[int] , **A_ : int ) -> None:
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , A_ , )
super().__init__(*A_ , **A_ )
| 204
| 1
|
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
__A : Optional[Any] = logging.get_logger(__name__)
def lowercase ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = []
def parse_line(_SCREAMING_SNAKE_CASE : Tuple ):
for line in fp:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = line.decode('''UTF-8''' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(''' ''' ):
# process a single warning and move it to `selected_warnings`.
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(_SCREAMING_SNAKE_CASE )
# Only keep the warnings specified in `targets`
if any(f': {x}: ' in warning for x in targets ):
selected_warnings.add(_SCREAMING_SNAKE_CASE )
buffer.clear()
continue
else:
_UpperCAmelCase = line.strip()
buffer.append(_SCREAMING_SNAKE_CASE )
if from_gh:
for filename in os.listdir(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
else:
try:
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_SCREAMING_SNAKE_CASE ) as fp:
parse_line(_SCREAMING_SNAKE_CASE )
except Exception:
logger.warning(
f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' )
return selected_warnings
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = [os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for p in os.listdir(_SCREAMING_SNAKE_CASE ) if (p.endswith('''.zip''' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return selected_warnings
if __name__ == "__main__":
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
return values.split(''',''' )
__A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
# optional parameters
parser.add_argument(
"--targets",
default="DeprecationWarning,UserWarning,FutureWarning",
type=list_str,
help="Comma-separated list of target warning(s) which we want to extract.",
)
parser.add_argument(
"--from_gh",
action="store_true",
help="If running from a GitHub action workflow and collecting warnings from its artifacts.",
)
__A : Dict = parser.parse_args()
__A : Optional[Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
__A : Dict = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print("=" * 80)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
__A : Any = extract_warnings(args.output_dir, args.targets)
__A : Union[str, Any] = sorted(selected_warnings)
with open(os.path.join(args.output_dir, "selected_warnings.json"), "w", encoding="UTF-8") as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 364
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__A : Union[str, Any] = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__A : Tuple = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
__A : List[str] = spec.loader.load_module()
__A : Any = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__A : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
__A : List[str] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def lowercase ( ):
'''simple docstring'''
_UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase = False
# source code of `config_class`
_UpperCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase = True
break
_UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
_UpperCAmelCase = '''\n'''.join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 326
| 0
|
from itertools import count
def UpperCAmelCase_ ( __snake_case = 50 ) -> int:
"""simple docstring"""
_lowercase =[1] * min_block_length
for n in count(__snake_case ):
fill_count_functions.append(1 )
for block_length in range(__snake_case , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1000000:
break
return n
if __name__ == "__main__":
print(f'''{solution() = }''')
| 5
|
UpperCAmelCase__ = {
'''A''': '''.-''', '''B''': '''-...''', '''C''': '''-.-.''', '''D''': '''-..''', '''E''': '''.''', '''F''': '''..-.''', '''G''': '''--.''',
'''H''': '''....''', '''I''': '''..''', '''J''': '''.---''', '''K''': '''-.-''', '''L''': '''.-..''', '''M''': '''--''', '''N''': '''-.''',
'''O''': '''---''', '''P''': '''.--.''', '''Q''': '''--.-''', '''R''': '''.-.''', '''S''': '''...''', '''T''': '''-''', '''U''': '''..-''',
'''V''': '''...-''', '''W''': '''.--''', '''X''': '''-..-''', '''Y''': '''-.--''', '''Z''': '''--..''', '''1''': '''.----''',
'''2''': '''..---''', '''3''': '''...--''', '''4''': '''....-''', '''5''': '''.....''', '''6''': '''-....''', '''7''': '''--...''',
'''8''': '''---..''', '''9''': '''----.''', '''0''': '''-----''', '''&''': '''.-...''', '''@''': '''.--.-.''',
''':''': '''---...''', ''',''': '''--..--''', '''.''': '''.-.-.-''', '''\'''': '''.----.''', '''"''': '''.-..-.''',
'''?''': '''..--..''', '''/''': '''-..-.''', '''=''': '''-...-''', '''+''': '''.-.-.''', '''-''': '''-....-''',
'''(''': '''-.--.''', ''')''': '''-.--.-''', '''!''': '''-.-.--''', ''' ''': '''/'''
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def UpperCAmelCase_ ( ) -> None:
"""simple docstring"""
_lowercase ='''Morse code here!'''
print(__snake_case )
_lowercase =encrypt(__snake_case )
print(__snake_case )
_lowercase =decrypt(__snake_case )
print(__snake_case )
if __name__ == "__main__":
main()
| 5
| 1
|
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 121
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Collection[float] | None = None ):
if components is None:
lowercase__ : List[Any] = []
lowercase__ : str = list(SCREAMING_SNAKE_CASE )
def __len__( self : Tuple ):
return len(self.__components )
def __str__( self : int ):
return "(" + ",".join(map(SCREAMING_SNAKE_CASE , self.__components ) ) + ")"
def __add__( self : List[Any] , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Optional[Any] = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : int = [self.__components[i] + other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else:
raise Exception("must have the same size" )
def __sub__( self : Any , SCREAMING_SNAKE_CASE : Vector ):
lowercase__ : Any = len(self )
if size == len(SCREAMING_SNAKE_CASE ):
lowercase__ : Any = [self.__components[i] - other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return Vector(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : int , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , (float, int) ):
lowercase__ : Dict = [c * other for c in self.__components]
return Vector(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(self ) == len(SCREAMING_SNAKE_CASE ):
lowercase__ : List[str] = len(self )
lowercase__ : Union[str, Any] = [self.__components[i] * other.component(SCREAMING_SNAKE_CASE ) for i in range(SCREAMING_SNAKE_CASE )]
return sum(SCREAMING_SNAKE_CASE )
else: # error case
raise Exception("invalid operand!" )
def snake_case ( self : Any ):
return Vector(self.__components )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ : int = value
def snake_case ( self : Any ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowercase__ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Vector , SCREAMING_SNAKE_CASE : bool = False ):
lowercase__ : Optional[Any] = self * other
lowercase__ : Union[str, Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
return Vector([0] * dimension )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (isinstance(lowerCamelCase__ , lowerCamelCase__ ))
lowercase__ : Tuple = [0] * dimension
lowercase__ : Union[str, Any] = 1
return Vector(lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert (
isinstance(lowerCamelCase__ , lowerCamelCase__ )
and isinstance(lowerCamelCase__ , lowerCamelCase__ )
and (isinstance(lowerCamelCase__ , (int, float) ))
)
return x * scalar + y
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : Optional[Any] = [random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )]
return Vector(lowerCamelCase__ )
class snake_case__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[list[float]] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = matrix
lowercase__ : int = w
lowercase__ : str = h
def __str__( self : int ):
lowercase__ : int = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Union[str, Any] = []
for i in range(self.__height ):
lowercase__ : int = [
self.__matrix[i][j] + other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , SCREAMING_SNAKE_CASE : Matrix ):
if self.__width == other.width() and self.__height == other.height():
lowercase__ : Dict = []
for i in range(self.__height ):
lowercase__ : Optional[Any] = [
self.__matrix[i][j] - other.component(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
matrix.append(SCREAMING_SNAKE_CASE )
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Any , SCREAMING_SNAKE_CASE : float ):
...
@overload
def __mul__( self : Tuple , SCREAMING_SNAKE_CASE : Vector ):
...
def __mul__( self : Optional[Any] , SCREAMING_SNAKE_CASE : float | Vector ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): # matrix-vector
if len(SCREAMING_SNAKE_CASE ) == self.__width:
lowercase__ : Any = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ : Union[str, Any] = [
self.__matrix[i][j] * other.component(SCREAMING_SNAKE_CASE )
for j in range(self.__width )
]
ans.change_component(SCREAMING_SNAKE_CASE , sum(SCREAMING_SNAKE_CASE ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(SCREAMING_SNAKE_CASE , (int, float) ): # matrix-scalar
lowercase__ : str = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(SCREAMING_SNAKE_CASE , self.__width , self.__height )
return None
def snake_case ( self : Union[str, Any] ):
return self.__height
def snake_case ( self : Optional[Any] ):
return self.__width
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ : Union[str, Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowercase__ : str = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(SCREAMING_SNAKE_CASE ) ):
lowercase__ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(SCREAMING_SNAKE_CASE , self.__width - 1 , self.__height - 1 ).determinant()
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
raise Exception("Indices out of bounds" )
def snake_case ( self : List[str] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ : Dict = [
self.__matrix[0][y] * self.cofactor(0 , SCREAMING_SNAKE_CASE ) for y in range(self.__width )
]
return sum(SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : list[list[float]] = [[0] * n for _ in range(lowerCamelCase__ )]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
random.seed(lowerCamelCase__ )
lowercase__ : list[list[float]] = [
[random.randint(lowerCamelCase__ , lowerCamelCase__ ) for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )
]
return Matrix(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
| 121
| 1
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'autoformer'
__lowerCamelCase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , lowercase = None , lowercase = None , lowercase = "student_t" , lowercase = "nll" , lowercase = 1 , lowercase = [1, 2, 3, 4, 5, 6, 7] , lowercase = True , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = 64 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 2 , lowercase = 32 , lowercase = 32 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 0.1 , lowercase = 100 , lowercase = 0.02 , lowercase = True , lowercase=True , lowercase = 10 , lowercase = 25 , lowercase = 3 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
A__ = prediction_length
A__ = context_length if context_length is not None else prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(lowercase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(self.lags_sequence ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
# Autoformer
A__ = label_length
A__ = moving_average
A__ = autocorrelation_factor
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 68
|
"""simple docstring"""
__UpperCamelCase = frozenset(
[
'''prompt''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image'''])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# Text guided image variation with an image mask
'''prompt''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''image''', '''mask_image''', '''negative_prompt'''])
__UpperCamelCase = frozenset(
[
# image variation with an image mask
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''image''', '''mask_image'''])
__UpperCamelCase = frozenset(
[
'''example_image''',
'''image''',
'''mask_image''',
'''height''',
'''width''',
'''guidance_scale''',
]
)
__UpperCamelCase = frozenset(['''example_image''', '''image''', '''mask_image'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''class_labels'''])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(['''batch_size'''])
__UpperCamelCase = frozenset([])
__UpperCamelCase = frozenset(
[
'''prompt''',
'''audio_length_in_s''',
'''guidance_scale''',
'''negative_prompt''',
'''prompt_embeds''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
]
)
__UpperCamelCase = frozenset(['''prompt''', '''negative_prompt'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
__UpperCamelCase = frozenset(['''input_tokens'''])
| 113
| 0
|
'''simple docstring'''
import sys
from collections import defaultdict
class _lowercase :
def __init__( self: Tuple ):
lowerCamelCase__ : int = []
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: int ):
return self.node_position[vertex]
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any , UpperCamelCase__: int ):
lowerCamelCase__ : Optional[Any] = pos
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int , UpperCamelCase__: Optional[int] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[str] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCamelCase__ : Union[str, Any] = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCamelCase__ : Optional[Any] = 2 * start + 1
else:
lowerCamelCase__ : Tuple = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCamelCase__ , lowerCamelCase__ : int = heap[smallest_child], positions[smallest_child]
lowerCamelCase__ , lowerCamelCase__ : int = (
heap[start],
positions[start],
)
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = temp, tempa
lowerCamelCase__ : Tuple = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , UpperCamelCase__ )
self.top_to_bottom(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: int ):
lowerCamelCase__ : Any = position[index]
while index != 0:
lowerCamelCase__ : Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCamelCase__ : str = heap[parent]
lowerCamelCase__ : List[Any] = position[parent]
self.set_position(position[parent] , UpperCamelCase__ )
else:
lowerCamelCase__ : int = val
lowerCamelCase__ : Tuple = temp
self.set_position(UpperCamelCase__ , UpperCamelCase__ )
break
lowerCamelCase__ : Optional[Any] = parent
else:
lowerCamelCase__ : Union[str, Any] = val
lowerCamelCase__ : Any = temp
self.set_position(UpperCamelCase__ , 0 )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Any ):
lowerCamelCase__ : Tuple = len(UpperCamelCase__ ) // 2 - 1
for i in range(UpperCamelCase__ , -1 , -1 ):
self.top_to_bottom(UpperCamelCase__ , UpperCamelCase__ , len(UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Any ):
lowerCamelCase__ : Union[str, Any] = positions[0]
lowerCamelCase__ : Any = sys.maxsize
self.top_to_bottom(UpperCamelCase__ , 0 , len(UpperCamelCase__ ) , UpperCamelCase__ )
return temp
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> str:
lowerCamelCase__ : Any = Heap()
lowerCamelCase__ : Any = [0] * len(UpperCamelCase )
lowerCamelCase__ : int = [-1] * len(UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCamelCase__ : Tuple = [] # Heap of Distance of vertices from their neighboring vertex
lowerCamelCase__ : Optional[int] = []
for vertex in range(len(UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase )
heap.node_position.append(UpperCamelCase )
lowerCamelCase__ : int = []
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Dict = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCamelCase__ : Optional[Any] = 0
lowerCamelCase__ : Any = distance
heap.heapify(UpperCamelCase , UpperCamelCase )
for _ in range(1 , len(UpperCamelCase ) ):
lowerCamelCase__ : Optional[int] = heap.delete_minimum(UpperCamelCase , UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCamelCase__ : List[Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase )]
):
lowerCamelCase__ : Tuple = distance
heap.bottom_to_top(
UpperCamelCase , heap.get_position(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Tuple = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_A : Tuple =int(input('''Enter number of edges: ''').strip())
_A : Union[str, Any] =defaultdict(list)
for _ in range(edges_number):
_A : Any =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 129
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_A : Union[str, Any] =8
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> Tuple:
lowerCamelCase__ : List[str] = x.device
lowerCamelCase__ : Any = (x * 255).int().clamp(0 , 255 )
lowerCamelCase__ : Optional[int] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase )
lowerCamelCase__ : int = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b c h w -> b c 1 h w""" )
lowerCamelCase__ : Tuple = ((x & mask) != 0).float()
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """b c d h w -> b (c d) h w""" )
lowerCamelCase__ : Optional[int] = bits * 2 - 1
return bits
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase=BITS ) -> List[Any]:
lowerCamelCase__ : List[Any] = x.device
lowerCamelCase__ : Dict = (x > 0).int()
lowerCamelCase__ : Optional[Any] = 2 ** torch.arange(bits - 1 , -1 , -1 , device=UpperCamelCase , dtype=torch.intaa )
lowerCamelCase__ : List[Any] = rearrange(UpperCamelCase , """d -> d 1 1""" )
lowerCamelCase__ : List[str] = rearrange(UpperCamelCase , """b (c d) h w -> b c d h w""" , d=8 )
lowerCamelCase__ : List[Any] = reduce(x * mask , """b c d h w -> b c h w""" , """sum""" )
return (dec / 255).clamp(0.0 , 1.0 )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 0.0 , UpperCamelCase = True , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCamelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[timestep]
lowerCamelCase__ : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCamelCase__ : Optional[int] = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCamelCase__ : Dict = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : Optional[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCamelCase__ : Tuple = self._get_variance(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Optional[int] = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCamelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Optional[Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__ : Tuple = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCamelCase__ : Dict = model_output.device if torch.is_tensor(UpperCamelCase ) else """cpu"""
lowerCamelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase ).to(UpperCamelCase )
lowerCamelCase__ : Optional[Any] = self._get_variance(UpperCamelCase , UpperCamelCase ) ** 0.5 * eta * noise
lowerCamelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase="epsilon" , UpperCamelCase=None , UpperCamelCase = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCamelCase__ : List[Any] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = torch.split(UpperCamelCase , sample.shape[1] , dim=1 )
else:
lowerCamelCase__ : List[str] = None
# 1. compute alphas, betas
lowerCamelCase__ : str = self.alphas_cumprod[t]
lowerCamelCase__ : List[str] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCamelCase__ : str = 1 - alpha_prod_t
lowerCamelCase__ : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCamelCase__ : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCamelCase__ : Optional[Any] = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCamelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCamelCase__ : List[Any] = torch.clamp(UpperCamelCase , -scale , UpperCamelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : Tuple = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCamelCase__ : Tuple = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCamelCase__ : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCamelCase__ : Optional[Any] = 0
if t > 0:
lowerCamelCase__ : Optional[Any] = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=UpperCamelCase ).to(model_output.device )
lowerCamelCase__ : str = (self._get_variance(UpperCamelCase , predicted_variance=UpperCamelCase ) ** 0.5) * noise
lowerCamelCase__ : Optional[int] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=UpperCamelCase , pred_original_sample=UpperCamelCase )
class _lowercase ( _lowercase ):
def __init__( self: List[str] , UpperCamelCase__: UNetaDConditionModel , UpperCamelCase__: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase__: Optional[float] = 1.0 , ):
super().__init__()
lowerCamelCase__ : Optional[int] = bit_scale
lowerCamelCase__ : List[Any] = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
@torch.no_grad()
def __call__( self: Union[str, Any] , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 256 , UpperCamelCase__: Optional[int] = 50 , UpperCamelCase__: Optional[torch.Generator] = None , UpperCamelCase__: Optional[int] = 1 , UpperCamelCase__: Optional[str] = "pil" , UpperCamelCase__: bool = True , **UpperCamelCase__: int , ):
lowerCamelCase__ : List[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase__ , )
lowerCamelCase__ : Union[str, Any] = decimal_to_bits(UpperCamelCase__ ) * self.bit_scale
lowerCamelCase__ : Union[str, Any] = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCamelCase__ : Tuple = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__ : Any = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
lowerCamelCase__ : Dict = bits_to_decimal(UpperCamelCase__ )
if output_type == "pil":
lowerCamelCase__ : int = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 129
| 1
|
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__snake_case : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = ['input_features', 'is_longer']
def __init__( self : str , lowerCAmelCase_ : List[str]=64 , lowerCAmelCase_ : Union[str, Any]=4_80_00 , lowerCAmelCase_ : List[Any]=4_80 , lowerCAmelCase_ : str=10 , lowerCAmelCase_ : str=10_24 , lowerCAmelCase_ : List[str]=0.0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : float = 0 , lowerCAmelCase_ : float = 1_40_00 , lowerCAmelCase_ : int = None , lowerCAmelCase_ : str = "fusion" , lowerCAmelCase_ : str = "repeatpad" , **lowerCAmelCase_ : int , ) -> List[str]:
'''simple docstring'''
super().__init__(
feature_size=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , padding_value=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , **lowerCAmelCase_ , )
A__ : Union[str, Any] =top_db
A__ : Tuple =truncation
A__ : List[str] =padding
A__ : List[Any] =fft_window_size
A__ : Optional[Any] =(fft_window_size >> 1) + 1
A__ : List[str] =hop_length
A__ : List[Any] =max_length_s
A__ : List[str] =max_length_s * sampling_rate
A__ : Optional[Any] =sampling_rate
A__ : int =frequency_min
A__ : Any =frequency_max
A__ : int =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm=lowerCAmelCase_ , mel_scale="""htk""" , )
A__ : Tuple =mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=lowerCAmelCase_ , min_frequency=lowerCAmelCase_ , max_frequency=lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , norm="""slaney""" , mel_scale="""slaney""" , )
def lowercase__ ( self : str ) -> Dict[str, Any]:
'''simple docstring'''
A__ : Optional[int] =copy.deepcopy(self.__dict__ )
A__ : Any =self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : np.array , lowerCAmelCase_ : Optional[np.array] = None ) -> np.ndarray:
'''simple docstring'''
A__ : List[str] =spectrogram(
lowerCAmelCase_ , window_function(self.fft_window_size , """hann""" ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=lowerCAmelCase_ , log_mel="""dB""" , )
return log_mel_spectrogram.T
def lowercase__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
'''simple docstring'''
A__ : Any =np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A__ : Any =[0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A__ : List[Any] =[0]
# randomly choose index for each part
A__ : int =np.random.choice(ranges[0] )
A__ : str =np.random.choice(ranges[1] )
A__ : Any =np.random.choice(ranges[2] )
A__ : Any =mel[idx_front : idx_front + chunk_frames, :]
A__ : str =mel[idx_middle : idx_middle + chunk_frames, :]
A__ : Union[str, Any] =mel[idx_back : idx_back + chunk_frames, :]
A__ : Optional[int] =torch.tensor(mel[None, None, :] )
A__ : Optional[int] =torch.nn.functional.interpolate(
lowerCAmelCase_ , size=[chunk_frames, 64] , mode="""bilinear""" , align_corners=lowerCAmelCase_ )
A__ : str =mel_shrink[0][0].numpy()
A__ : int =np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def lowercase__ ( self : Optional[int] , lowerCAmelCase_ : np.array , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any ) -> np.array:
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A__ : List[Any] =True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A__ : Optional[Any] =len(lowerCAmelCase_ ) - max_length
A__ : Dict =np.random.randint(0 , overflow + 1 )
A__ : Optional[int] =waveform[idx : idx + max_length]
A__ : Any =self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A__ : Union[str, Any] =self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters )
A__ : Tuple =max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A__ : Optional[int] =mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A__ : List[str] =np.stack([mel, mel, mel, mel] , axis=0 )
A__ : Union[str, Any] =False
else:
A__ : List[str] =self._random_mel_fusion(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ : Tuple =True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented" )
else:
A__ : str =False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A__ : Optional[Any] =int(max_length / len(lowerCAmelCase_ ) )
A__ : str =np.stack(np.tile(lowerCAmelCase_ , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A__ : int =int(max_length / len(lowerCAmelCase_ ) )
A__ : Optional[Any] =np.stack(np.tile(lowerCAmelCase_ , lowerCAmelCase_ ) )
A__ : List[Any] =np.pad(lowerCAmelCase_ , (0, max_length - waveform.shape[0]) , mode="""constant""" , constant_values=0 )
if truncation == "fusion":
A__ : str =self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters )
A__ : Tuple =np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
A__ : str =self._np_extract_fbank_features(lowerCAmelCase_ , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] , lowerCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Optional[str] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : List[Any] , ) -> BatchFeature:
'''simple docstring'''
A__ : Optional[int] =truncation if truncation is not None else self.truncation
A__ : Optional[Any] =padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A__ : Tuple =isinstance(lowerCAmelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
A__ : int =is_batched_numpy or (
isinstance(lowerCAmelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ : List[str] =[np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase_ , np.ndarray ):
A__ : List[str] =np.asarray(lowerCAmelCase_ , dtype=np.floataa )
elif isinstance(lowerCAmelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ : Optional[Any] =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ : Tuple =[np.asarray(lowerCAmelCase_ )]
# convert to mel spectrogram, truncate and pad if needed.
A__ : Union[str, Any] =[
self._get_input_mel(lowerCAmelCase_ , max_length if max_length else self.nb_max_samples , lowerCAmelCase_ , lowerCAmelCase_ )
for waveform in raw_speech
]
A__ : Union[str, Any] =[]
A__ : Tuple =[]
for mel, longer in padded_inputs:
input_mel.append(lowerCAmelCase_ )
is_longer.append(lowerCAmelCase_ )
if truncation == "fusion" and sum(lowerCAmelCase_ ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A__ : List[str] =np.random.randint(0 , len(lowerCAmelCase_ ) )
A__ : Optional[int] =True
if isinstance(input_mel[0] , lowerCAmelCase_ ):
A__ : int =[np.asarray(lowerCAmelCase_ , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A__ : int =[[longer] for longer in is_longer]
A__ : str ={"""input_features""": input_mel, """is_longer""": is_longer}
A__ : Union[str, Any] =BatchFeature(lowerCAmelCase_ )
if return_tensors is not None:
A__ : List[str] =input_features.convert_to_tensors(lowerCAmelCase_ )
return input_features
| 134
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : Optional[Any] = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 134
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class a__ ( unittest.TestCase ):
"""simple docstring"""
def _snake_case (self ):
__lowerCAmelCase = 10
def _snake_case (self ):
__lowerCAmelCase = [1, 2, 3, 4]
__lowerCAmelCase = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCAmelCase = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(__lowercase , self.block_size , 0 ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase )
self.assertEqual(__lowercase , [] )
def _snake_case (self ):
__lowerCAmelCase = ''''''
__lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase )
self.assertEqual(__lowercase , [] )
self.assertEqual(__lowercase , [] )
def _snake_case (self ):
__lowerCAmelCase = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowerCAmelCase , __lowerCAmelCase = process_story(__lowercase )
__lowerCAmelCase = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(__lowercase , __lowercase )
__lowerCAmelCase = ['''It was the best of times.''']
self.assertEqual(__lowercase , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = torch.tensor([1, 2, 3, 4] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__lowercase , 0 ).numpy() , expected.numpy() )
def _snake_case (self ):
__lowerCAmelCase = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowercase , 23 ).numpy() , expected.numpy() )
def _snake_case (self ):
__lowerCAmelCase = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCAmelCase = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__lowercase , 1 ).numpy() , expected.numpy() )
def _snake_case (self ):
__lowerCAmelCase = 1_01
__lowerCAmelCase = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowerCAmelCase = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCAmelCase = compute_token_type_ids(__lowercase , __lowercase )
np.testing.assert_array_equal(__lowercase , __lowercase )
| 364
|
'''simple docstring'''
import numpy as np
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1E-12, lowerCamelCase = 1_0_0, ):
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase)[0] == np.shape(lowerCamelCase)[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase) == np.iscomplexobj(lowerCamelCase)
__lowerCAmelCase = np.iscomplexobj(lowerCamelCase)
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase, input_matrix.conj().T)
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(lowerCamelCase, lowerCamelCase)
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(lowerCamelCase)
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(lowerCamelCase, np.dot(lowerCamelCase, lowerCamelCase))
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_)
return lambda_, vector
def __magic_name__( ):
__lowerCAmelCase = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]])
__lowerCAmelCase = np.array([4_1, 4, 2_0])
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa)
__lowerCAmelCase = np.triu(1J * complex_input_matrix, 1)
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([4_1, 4, 2_0]).astype(np.complexaaa)
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(lowerCamelCase, lowerCamelCase)
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(lowerCamelCase)
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase) - np.abs(lowerCamelCase)) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 9
| 0
|
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
a__ : Tuple = {'UserAgent': UserAgent().random}
def _UpperCamelCase ( __A ) -> dict:
'''simple docstring'''
UpperCamelCase__ = script.contents[0]
UpperCamelCase__ = json.loads(data[data.find("{\"config\"" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class lowercase_ :
def __init__( self , a ):
UpperCamelCase__ = f'''https://www.instagram.com/{username}/'''
UpperCamelCase__ = self.get_json()
def __a ( self ):
UpperCamelCase__ = requests.get(self.url , headers=a ).text
UpperCamelCase__ = BeautifulSoup(a , "html.parser" ).find_all("script" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self ):
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self ):
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def __a ( self ):
return self.user_data["username"]
@property
def __a ( self ):
return self.user_data["full_name"]
@property
def __a ( self ):
return self.user_data["biography"]
@property
def __a ( self ):
return self.user_data["business_email"]
@property
def __a ( self ):
return self.user_data["external_url"]
@property
def __a ( self ):
return self.user_data["edge_followed_by"]["count"]
@property
def __a ( self ):
return self.user_data["edge_follow"]["count"]
@property
def __a ( self ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def __a ( self ):
return self.user_data["profile_pic_url_hd"]
@property
def __a ( self ):
return self.user_data["is_verified"]
@property
def __a ( self ):
return self.user_data["is_private"]
def _UpperCamelCase ( __A = "github" ) -> None:
'''simple docstring'''
import os
if os.environ.get("CI" ):
return # test failing on GitHub Actions
UpperCamelCase__ = InstagramUser(__A )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , __A )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram." )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Any = InstagramUser('github')
print(instagram_user)
print(F"""{instagram_user.number_of_posts = }""")
print(F"""{instagram_user.number_of_followers = }""")
print(F"""{instagram_user.number_of_followings = }""")
print(F"""{instagram_user.email = }""")
print(F"""{instagram_user.website = }""")
print(F"""{instagram_user.profile_picture_url = }""")
print(F"""{instagram_user.is_verified = }""")
print(F"""{instagram_user.is_private = }""")
| 80
|
def lowerCAmelCase__( lowercase : int = 100_0000 ) -> int:
__snake_case : List[Any] = limit + 1
__snake_case : List[str] = [0] * limit
for first_term in range(1 , lowercase ):
for n in range(lowercase , lowercase , lowercase ):
__snake_case : Union[str, Any] = first_term + n / first_term
if common_difference % 4: # d must be divisble by 4
continue
else:
common_difference /= 4
if (
first_term > common_difference
and first_term < 4 * common_difference
): # since x,y,z are positive integers
frequency[n] += 1 # so z>0 and a>d ,also 4d<a
__snake_case : Tuple = sum(1 for x in frequency[1:limit] if x == 10 )
return count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import pi, sqrt
def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = ["""image_processor""", """tokenizer"""]
__a = """AutoImageProcessor"""
__a = """AutoTokenizer"""
def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ):
'''simple docstring'''
super().__init__(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : str = self.image_processor
def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if images is not None:
__UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
if text is not None and images is not None:
__UpperCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase )
def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 320
| 0
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def lowerCamelCase__ ( a , a ) -> Tuple:
# ===== initialization =====
_A: Union[str, Any] = Mock()
_A: List[Any] = conn, Mock()
_A: Optional[Any] = iter([1, None] )
_A: str = lambda a : next(a )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=a )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 121
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : List[Any] = ['''pixel_values''']
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : int = 8 , **lowerCAmelCase_ : Tuple , ):
"""simple docstring"""
super().__init__(**lowerCAmelCase_ )
_A: List[str] = do_rescale
_A: Any = rescale_factor
_A: List[Any] = do_pad
_A: Tuple = pad_size
def __magic_name__ ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : float , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Any ):
"""simple docstring"""
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def __magic_name__ ( self : Tuple , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
_A , _A: Optional[int] = get_image_size(lowerCAmelCase_ )
_A: Union[str, Any] = (old_height // size + 1) * size - old_height
_A: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , lowerCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase_ : int , ):
"""simple docstring"""
_A: List[str] = do_rescale if do_rescale is not None else self.do_rescale
_A: int = rescale_factor if rescale_factor is not None else self.rescale_factor
_A: str = do_pad if do_pad is not None else self.do_pad
_A: Union[str, Any] = pad_size if pad_size is not None else self.pad_size
_A: List[Any] = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
_A: Union[str, Any] = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_rescale:
_A: str = [self.rescale(image=lowerCAmelCase_ , scale=lowerCAmelCase_ ) for image in images]
if do_pad:
_A: str = [self.pad(lowerCAmelCase_ , size=lowerCAmelCase_ ) for image in images]
_A: Optional[Any] = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
_A: List[Any] = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 121
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = [
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def UpperCamelCase ( UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a_ = k.replace(UpperCAmelCase , UpperCAmelCase )
if k.startswith("encoder" ):
a_ = k.replace(".attn" , ".self_attn" )
a_ = k.replace("norm1" , "self_attn_layer_norm" )
a_ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
a_ = k.replace("norm1" , "self_attn_layer_norm" )
a_ = k.replace("norm2" , "encoder_attn_layer_norm" )
a_ = k.replace("norm3" , "final_layer_norm" )
return k
def UpperCamelCase ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
a_ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
a_ = sd.pop(UpperCAmelCase )
a_ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
a_ = v
UpperCamelCase_ = ['START']
@torch.no_grad()
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
a_ = torch.load(UpperCAmelCase , map_location="cpu" )
a_ = model["model"]
a_ = BlenderbotConfig.from_json_file(UpperCAmelCase )
a_ = BlenderbotForConditionalGeneration(UpperCAmelCase )
a_ = m.model.state_dict().keys()
a_ = []
a_ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a_ = rename_state_dict_key(UpperCAmelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a_ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(UpperCAmelCase )
m.model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase )
m.half()
m.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCamelCase_ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 303
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 1
|
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case : Tuple =logging.get_logger(__name__)
__snake_case : Any ={
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__snake_case : str ={
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
__snake_case : Optional[int] ={'facebook/blenderbot_small-90M': 5_1_2}
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = set()
lowerCAmelCase__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
lowerCAmelCase__ : List[str] = char
lowerCAmelCase__ : List[str] = set(lowerCamelCase_)
return pairs
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =VOCAB_FILES_NAMES
snake_case_ =PRETRAINED_VOCAB_FILES_MAP
snake_case_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ =["""input_ids""", """attention_mask"""]
def __init__(self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase="__start__" ,__lowerCamelCase="__end__" ,__lowerCamelCase="__unk__" ,__lowerCamelCase="__null__" ,**__lowerCamelCase ,) -> Dict:
"""simple docstring"""
super().__init__(unk_token=__lowerCamelCase ,bos_token=__lowerCamelCase ,eos_token=__lowerCamelCase ,pad_token=__lowerCamelCase ,**__lowerCamelCase )
with open(__lowerCamelCase ,encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ : Any = json.load(__lowerCamelCase )
lowerCAmelCase__ : str = {v: k for k, v in self.encoder.items()}
with open(__lowerCamelCase ,encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ : Dict = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ : str = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ : List[Any] = dict(zip(__lowerCamelCase ,range(len(__lowerCamelCase ) ) ) )
lowerCAmelCase__ : str = {}
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
return dict(self.encoder ,**self.added_tokens_encoder )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ : str = re.sub('''([.,!?()])''' ,R''' \1''' ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = re.sub('''(\')''' ,R''' \1 ''' ,__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = re.sub(R'''\s{2,}''' ,''' ''' ,__lowerCamelCase )
if "\n" in token:
lowerCAmelCase__ : str = token.replace('''\n''' ,''' __newln__''' )
lowerCAmelCase__ : Optional[int] = token.split(''' ''' )
lowerCAmelCase__ : Dict = []
for token in tokens:
if not len(__lowerCamelCase ):
continue
lowerCAmelCase__ : Union[str, Any] = token.lower()
lowerCAmelCase__ : Dict = tuple(__lowerCamelCase )
lowerCAmelCase__ : Tuple = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ : int = get_pairs(__lowerCamelCase )
if not pairs:
words.append(__lowerCamelCase )
continue
while True:
lowerCAmelCase__ : int = min(__lowerCamelCase ,key=lambda __lowerCamelCase : self.bpe_ranks.get(__lowerCamelCase ,float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = bigram
lowerCAmelCase__ : Optional[int] = []
lowerCAmelCase__ : str = 0
while i < len(__lowerCamelCase ):
try:
lowerCAmelCase__ : Optional[Any] = word.index(__lowerCamelCase ,__lowerCamelCase )
new_word.extend(word[i:j] )
lowerCAmelCase__ : str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ : List[Any] = tuple(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = new_word
if len(__lowerCamelCase ) == 1:
break
else:
lowerCAmelCase__ : Any = get_pairs(__lowerCamelCase )
lowerCAmelCase__ : int = '''@@ '''.join(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = word[:-4]
lowerCAmelCase__ : Union[str, Any] = word
words.append(__lowerCamelCase )
return " ".join(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = re.findall(R'''\S+\n?''' ,__lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(''' ''' ) ) )
return split_tokens
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Any = token.lower()
return self.encoder.get(__lowerCamelCase ,self.encoder.get(self.unk_token ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
return self.decoder.get(__lowerCamelCase ,self.unk_token )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Any = ''' '''.join(__lowerCamelCase ).replace('''@@ ''' ,'''''' ).strip()
return out_string
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ : Any = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ : Optional[int] = os.path.join(
__lowerCamelCase ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__lowerCamelCase ,ensure_ascii=__lowerCamelCase ) + '''\n''' )
lowerCAmelCase__ : Optional[int] = 0
with open(__lowerCamelCase ,'''w''' ,encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ : List[str] = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 129
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__snake_case : int =logging.get_logger(__name__)
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[str] = 4
lowerCAmelCase__ : List[str] = 3
lowerCAmelCase__ : Any = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = {
'''block_out_channels''': (32, 64),
'''down_block_types''': ('''DownBlock2D''', '''AttnDownBlock2D'''),
'''up_block_types''': ('''AttnUpBlock2D''', '''UpBlock2D'''),
'''attention_head_dim''': 3,
'''out_channels''': 3,
'''in_channels''': 3,
'''layers_per_block''': 2,
'''sample_size''': 32,
}
lowerCAmelCase__ : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Optional[Any] = (32, 32)
lowerCAmelCase__ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor([10] ).to(__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return (4, 32, 32)
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''sample_size''': 32,
'''in_channels''': 4,
'''out_channels''': 4,
'''layers_per_block''': 2,
'''block_out_channels''': (32, 64),
'''attention_head_dim''': 32,
'''down_block_types''': ('''DownBlock2D''', '''DownBlock2D'''),
'''up_block_types''': ('''UpBlock2D''', '''UpBlock2D'''),
}
lowerCAmelCase__ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''' ,'''This test is supposed to run on GPU''' )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase )
model_accelerate.to(__lowerCamelCase )
model_accelerate.eval()
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : Dict = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = model_accelerate(__lowerCamelCase ,__lowerCamelCase )['''sample''']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''' ,output_loading_info=__lowerCamelCase ,low_cpu_mem_usage=__lowerCamelCase )
model_normal_load.to(__lowerCamelCase )
model_normal_load.eval()
lowerCAmelCase__ : List[Any] = model_normal_load(__lowerCamelCase ,__lowerCamelCase )['''sample''']
assert torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
lowerCAmelCase__ : str = noise.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([10] * noise.shape[0] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : str = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCAmelCase__ : str = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-3 ) )
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =UNetaDModel
snake_case_ ="""sample"""
@property
def lowerCAmelCase__ (self ,__lowerCamelCase=(32, 32) ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : str = 4
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__lowerCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
return (3, 32, 32)
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = {
'''block_out_channels''': [32, 64, 64, 64],
'''in_channels''': 3,
'''layers_per_block''': 1,
'''out_channels''': 3,
'''time_embedding_type''': '''fourier''',
'''norm_eps''': 1e-6,
'''mid_block_scale_factor''': math.sqrt(2.0 ),
'''norm_num_groups''': None,
'''down_block_types''': [
'''SkipDownBlock2D''',
'''AttnSkipDownBlock2D''',
'''SkipDownBlock2D''',
'''SkipDownBlock2D''',
],
'''up_block_types''': [
'''SkipUpBlock2D''',
'''SkipUpBlock2D''',
'''AttnSkipUpBlock2D''',
'''SkipUpBlock2D''',
],
}
lowerCAmelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' ,output_loading_info=__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = self.dummy_input
lowerCAmelCase__ : Tuple = floats_tensor((4, 3) + (2_56, 2_56) ).to(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = noise
lowerCAmelCase__ : Union[str, Any] = model(**__lowerCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Dict = 4
lowerCAmelCase__ : Optional[Any] = 3
lowerCAmelCase__ : List[Any] = (2_56, 2_56)
lowerCAmelCase__ : str = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : Any = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Optional[Any] = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -1_0980.7129, -2_0028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = 4
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : str = (32, 32)
lowerCAmelCase__ : Tuple = torch.ones((batch_size, num_channels) + sizes ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = torch.tensor(batch_size * [1e-4] ).to(__lowerCamelCase )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,__lowerCamelCase ).sample
lowerCAmelCase__ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(__lowerCamelCase ,__lowerCamelCase ,rtol=1e-2 ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
pass
| 129
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ['''pixel_values''']
def __init__( self, lowercase_ = True, lowercase_ = None, lowercase_ = PIL.Image.BICUBIC, lowercase_ = True, lowercase_ = None, lowercase_ = 1 / 255, lowercase_ = True, lowercase_ = True, lowercase_ = None, lowercase_ = None, **lowercase_, ) -> None:
super().__init__(**lowercase_ )
snake_case = size if size is not None else {'height': 256, 'width': 256}
snake_case = get_size_dict(lowercase_ )
snake_case = crop_size if crop_size is not None else {'height': 224, 'width': 224}
snake_case = get_size_dict(lowercase_, param_name='crop_size' )
snake_case = do_resize
snake_case = size
snake_case = resample
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_rescale
snake_case = rescale_factor
snake_case = do_normalize
snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = PIL.Image.BICUBIC, lowercase_ = None, **lowercase_, ) -> np.ndarray:
snake_case = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
lowercase_, size=(size['height'], size['width']), resample=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray:
snake_case = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(lowercase_, size=(size['height'], size['width']), data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> int:
return rescale(lowercase_, scale=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_, lowercase_ = None, **lowercase_, ) -> np.ndarray:
return normalize(lowercase_, mean=lowercase_, std=lowercase_, data_format=lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = None, lowercase_=None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = None, lowercase_ = ChannelDimension.FIRST, **lowercase_, ) -> PIL.Image.Image:
snake_case = do_resize if do_resize is not None else self.do_resize
snake_case = resample if resample is not None else self.resample
snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case = do_rescale if do_rescale is not None else self.do_rescale
snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case = do_normalize if do_normalize is not None else self.do_normalize
snake_case = image_mean if image_mean is not None else self.image_mean
snake_case = image_std if image_std is not None else self.image_std
snake_case = size if size is not None else self.size
snake_case = get_size_dict(lowercase_ )
snake_case = crop_size if crop_size is not None else self.crop_size
snake_case = get_size_dict(lowercase_, param_name='crop_size' )
snake_case = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
snake_case = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
snake_case = [self.resize(image=lowercase_, size=lowercase_, resample=lowercase_ ) for image in images]
if do_center_crop:
snake_case = [self.center_crop(image=lowercase_, size=lowercase_ ) for image in images]
if do_rescale:
snake_case = [self.rescale(image=lowercase_, scale=lowercase_ ) for image in images]
if do_normalize:
snake_case = [self.normalize(image=lowercase_, mean=lowercase_, std=lowercase_ ) for image in images]
snake_case = [to_channel_dimension_format(lowercase_, lowercase_ ) for image in images]
snake_case = {'pixel_values': images}
return BatchFeature(data=lowercase_, tensor_type=lowercase_ )
| 332
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> None:
create_state_space_tree(A , [] , 0 , [0 for i in range(len(A ) )] )
def __magic_name__ ( A , A , A , A , ) -> None:
if index == len(A ):
print(A )
return
for i in range(len(A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
snake_case = True
create_state_space_tree(A , A , index + 1 , A )
current_sequence.pop()
snake_case = False
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCAmelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 332
| 1
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = parent
def lowerCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
return {}
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
snake_case_ = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
snake_case_ = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowerCAmelCase__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = MarkupLMFeatureExtractionTester(self )
@property
def lowerCAmelCase__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowerCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
# Initialize feature_extractor
snake_case_ = self.feature_extraction_class()
# Test not batched input
snake_case_ = get_html_strings()[0]
snake_case_ = feature_extractor(lowerCAmelCase__ )
# fmt: off
snake_case_ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
snake_case_ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
# Test batched
snake_case_ = get_html_strings()
snake_case_ = feature_extractor(lowerCAmelCase__ )
# fmt: off
snake_case_ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
snake_case_ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , lowerCAmelCase__ )
self.assertEqual(encoding.xpaths , lowerCAmelCase__ )
| 159
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _UpperCamelCase ( *lowercase__ , lowercase__ = None , lowercase__=True , lowercase__=2 ):
from .. import __version__
__SCREAMING_SNAKE_CASE : Optional[Any] = take_from
__SCREAMING_SNAKE_CASE : List[str] = ()
if not isinstance(args[0] , lowercase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowercase__ ).base_version ) >= version.parse(lowercase__ ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__SCREAMING_SNAKE_CASE : Optional[Any] = None
if isinstance(lowercase__ , lowercase__ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(lowercase__ , lowercase__ ):
values += (getattr(lowercase__ , lowercase__ ),)
__SCREAMING_SNAKE_CASE : List[str] = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__SCREAMING_SNAKE_CASE : str = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__SCREAMING_SNAKE_CASE : Any = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , lowercase__ , stacklevel=lowercase__ )
if isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1]
__SCREAMING_SNAKE_CASE : Dict = call_frame.filename
__SCREAMING_SNAKE_CASE : Optional[Any] = call_frame.lineno
__SCREAMING_SNAKE_CASE : int = call_frame.function
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(lowercase__ ) == 0:
return
elif len(lowercase__ ) == 1:
return values[0]
return values
| 9
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a__ )
class _snake_case ( a__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase :str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCAmelCase :ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
lowerCAmelCase :ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
lowerCAmelCase :str = "question"
lowerCAmelCase :str = "context"
lowerCAmelCase :str = "answers"
@property
def snake_case__ ( self):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 350
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _snake_case ( a__ ):
lowerCAmelCase :Optional[int] = ''''''
lowerCAmelCase :str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase :str = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase :str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , _lowerCamelCase = "" , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase):
super().__init__(self , **_lowerCamelCase)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCAmelCase__ : Optional[Any] = fsspec.open(
_lowerCamelCase , mode="""rb""" , protocol=_lowerCamelCase , compression=self.compression , client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCAmelCase__ : List[Any] = os.path.basename(self.file.path.split("""::""")[0])
UpperCAmelCase__ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""")]
if """.""" in self.compressed_name
else self.compressed_name
)
UpperCAmelCase__ : Tuple = None
@classmethod
def snake_case__ ( cls , _lowerCamelCase):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowerCamelCase).lstrip("""/""")
def snake_case__ ( self):
if self.dir_cache is None:
UpperCAmelCase__ : Optional[Any] = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name}
UpperCAmelCase__ : Union[str, Any] = {f["""name"""]: f}
def snake_case__ ( self , _lowerCamelCase):
return self.file.open().read()
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase , ):
UpperCAmelCase__ : List[str] = self._strip_protocol(_lowerCamelCase)
if mode != "rb":
raise ValueError(f'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''')
return self.file.open()
class _snake_case ( a__ ):
lowerCAmelCase :Dict = '''bz2'''
lowerCAmelCase :List[str] = '''bz2'''
lowerCAmelCase :Dict = '''.bz2'''
class _snake_case ( a__ ):
lowerCAmelCase :int = '''gzip'''
lowerCAmelCase :Tuple = '''gzip'''
lowerCAmelCase :str = '''.gz'''
class _snake_case ( a__ ):
lowerCAmelCase :List[str] = '''lz4'''
lowerCAmelCase :Any = '''lz4'''
lowerCAmelCase :int = '''.lz4'''
class _snake_case ( a__ ):
lowerCAmelCase :Union[str, Any] = '''xz'''
lowerCAmelCase :int = '''xz'''
lowerCAmelCase :List[Any] = '''.xz'''
class _snake_case ( a__ ):
lowerCAmelCase :Tuple = '''zstd'''
lowerCAmelCase :List[str] = '''zstd'''
lowerCAmelCase :Union[str, Any] = '''.zst'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "rb" , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = DEFAULT_BLOCK_SIZE , **_lowerCamelCase , ):
super().__init__(
fo=_lowerCamelCase , mode=_lowerCamelCase , target_protocol=_lowerCamelCase , target_options=_lowerCamelCase , block_size=_lowerCamelCase , **_lowerCamelCase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCAmelCase__ : Dict = self.file.__enter__
class _snake_case :
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Optional[int] = file_
def __enter__( self):
self._file.__enter__()
return self
def __exit__( self , *_lowerCamelCase , **_lowerCamelCase):
self._file.__exit__(*_lowerCamelCase , **_lowerCamelCase)
def __iter__( self):
return iter(self._file)
def snake_case__ ( self):
return next(self._file)
def __getattr__( self , _lowerCamelCase):
return getattr(self._file , _lowerCamelCase)
def fixed_enter(*_lowerCamelCase , **_lowerCamelCase):
return WrappedFile(_enter(*_lowerCamelCase , **_lowerCamelCase))
UpperCAmelCase__ : List[Any] = fixed_enter
| 283
| 0
|
"""simple docstring"""
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = FlaxMTaForConditionalGeneration.from_pretrained('''google/mt5-small''' )
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__snake_case : Dict = tokenizer('''Hello there''' , return_tensors='''np''' ).input_ids
__snake_case : Dict = tokenizer('''Hi I am''' , return_tensors='''np''' ).input_ids
__snake_case : Optional[int] = shift_tokens_right(a_ , model.config.pad_token_id , model.config.decoder_start_token_id )
__snake_case : List[Any] = model(a_ , decoder_input_ids=a_ ).logits
__snake_case : Optional[Any] = optax.softmax_cross_entropy(a_ , onehot(a_ , logits.shape[-1] ) ).mean()
__snake_case : Union[str, Any] = -(labels.shape[-1] * loss.item())
__snake_case : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 102
|
"""simple docstring"""
def A_ ( ):
"""simple docstring"""
_a = []
_a = 1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
_a = ''''''.join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[9_99] )
* int(constant[99_99] )
* int(constant[9_99_99] )
* int(constant[99_99_99] )
)
if __name__ == "__main__":
print(solution())
| 320
| 0
|
import os
import sys
_lowerCamelCase : str = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
_lowerCamelCase : Union[str, Any] = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def a__ ( *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
return AutoConfig.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def a__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ) -> Optional[Any]:
return AutoTokenizer.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModel.__doc__ )
def a__ ( *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Union[str, Any]:
return AutoModel.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def a__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Dict:
return AutoModelForCausalLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def a__ ( *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ) -> List[Any]:
return AutoModelForMaskedLM.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def a__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> int:
return AutoModelForSequenceClassification.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def a__ ( *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ) -> Tuple:
return AutoModelForQuestionAnswering.from_pretrained(*UpperCAmelCase , **UpperCAmelCase )
| 99
|
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Dict = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__ )
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Optional[Any], *__A : Tuple, **__A : Tuple ):
super().__init__(*__A, **__A )
self.check_model_type(__A )
def __magic_name__ ( self : Union[str, Any], __A : int=None, __A : Tuple=None, __A : Any=None, **__A : Optional[int] ):
UpperCAmelCase , UpperCAmelCase : List[Any] = {}, {}
if padding is not None:
UpperCAmelCase : Optional[int] = padding
if truncation is not None:
UpperCAmelCase : Optional[int] = truncation
if top_k is not None:
UpperCAmelCase : Tuple = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Union[str, Any], __A : Union["Image.Image", str], __A : str = None, **__A : Optional[int] ):
if isinstance(__A, (Image.Image, str) ) and isinstance(__A, __A ):
UpperCAmelCase : int = {'''image''': image, '''question''': question}
else:
UpperCAmelCase : str = image
UpperCAmelCase : Union[str, Any] = super().__call__(__A, **__A )
return results
def __magic_name__ ( self : List[str], __A : Union[str, Any], __A : Tuple=False, __A : List[Any]=False ):
UpperCAmelCase : int = load_image(inputs['''image'''] )
UpperCAmelCase : List[str] = self.tokenizer(
inputs['''question'''], return_tensors=self.framework, padding=__A, truncation=__A )
UpperCAmelCase : Union[str, Any] = self.image_processor(images=__A, return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __magic_name__ ( self : Optional[Any], __A : List[Any] ):
UpperCAmelCase : Optional[int] = self.model(**__A )
return model_outputs
def __magic_name__ ( self : Any, __A : List[str], __A : Union[str, Any]=5 ):
if top_k > self.model.config.num_labels:
UpperCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
UpperCAmelCase : Any = model_outputs.logits.sigmoid()[0]
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = probs.topk(__A )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
UpperCAmelCase : str = scores.tolist()
UpperCAmelCase : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A, __A )]
| 99
| 1
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def a__ ( snake_case ):
"""simple docstring"""
return input_array.reshape((input_array.size, 1) )
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = np.nan
for i in range(snake_case ):
__SCREAMING_SNAKE_CASE : Tuple = features[:, labels == i]
__SCREAMING_SNAKE_CASE : Optional[Any] = data.mean(1 )
# Centralize the data of class i
__SCREAMING_SNAKE_CASE : List[str] = data - column_reshape(snake_case )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(snake_case , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__SCREAMING_SNAKE_CASE : Tuple = np.dot(snake_case , centered_data.T )
return covariance_sum / features.shape[1]
def a__ ( snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = features.mean(1 )
__SCREAMING_SNAKE_CASE : Dict = np.nan
for i in range(snake_case ):
__SCREAMING_SNAKE_CASE : Any = features[:, labels == i]
__SCREAMING_SNAKE_CASE : Any = data.shape[1]
__SCREAMING_SNAKE_CASE : Tuple = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(snake_case ) - column_reshape(snake_case ) , (column_reshape(snake_case ) - column_reshape(snake_case )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__SCREAMING_SNAKE_CASE : int = device_data * np.dot(
column_reshape(snake_case ) - column_reshape(snake_case ) , (column_reshape(snake_case ) - column_reshape(snake_case )).T , )
return covariance_sum / features.shape[1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Check if the features have been loaded
if features.any():
__SCREAMING_SNAKE_CASE : List[Any] = features.mean(1 )
# Center the dataset
__SCREAMING_SNAKE_CASE : List[str] = features - np.reshape(snake_case , (data_mean.size, 1) )
__SCREAMING_SNAKE_CASE : Tuple = np.dot(snake_case , centered_data.T ) / features.shape[1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = np.linalg.eigh(snake_case )
# Take all the columns in the reverse order (-1), and then takes only the first
__SCREAMING_SNAKE_CASE : Any = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__SCREAMING_SNAKE_CASE : List[Any] = np.dot(filtered_eigenvectors.T , snake_case )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=snake_case )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = eigh(
covariance_between_classes(snake_case , snake_case , snake_case ) , covariance_within_classes(snake_case , snake_case , snake_case ) , )
__SCREAMING_SNAKE_CASE : Tuple = eigenvectors[:, ::-1][:, :dimensions]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = np.linalg.svd(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = svd_matrix[:, 0:dimensions]
__SCREAMING_SNAKE_CASE : int = np.dot(filtered_svd_matrix.T , snake_case )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=snake_case )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( ):
"""simple docstring"""
# Create dummy dataset with 2 classes and 3 features
__SCREAMING_SNAKE_CASE : Tuple = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__SCREAMING_SNAKE_CASE : Optional[int] = np.array([0, 0, 0, 1, 1] )
__SCREAMING_SNAKE_CASE : List[str] = 2
__SCREAMING_SNAKE_CASE : int = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(snake_case ) as error_info:
__SCREAMING_SNAKE_CASE : Any = linear_discriminant_analysis(
snake_case , snake_case , snake_case , snake_case )
if isinstance(snake_case , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 2
__SCREAMING_SNAKE_CASE : Any = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(snake_case ) as error_info:
__SCREAMING_SNAKE_CASE : Optional[int] = principal_component_analysis(snake_case , snake_case )
if not np.allclose(snake_case , snake_case ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 1
|
from __future__ import annotations
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Tuple ):
# Checks if the entire collection has been sorted
if len(lowerCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCAmelCase_, n - 1 )
rec_insertion_sort(lowerCAmelCase_, n - 1 )
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Optional[Any] ):
# Checks order between adjacent elements
if index >= len(lowerCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
__lowerCAmelCase , __lowerCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCAmelCase_, index + 1 )
if __name__ == "__main__":
_snake_case : Optional[int] = input('Enter integers separated by spaces: ')
_snake_case : list[int] = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 359
|
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : Dict = logging.get_logger(__name__)
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : str = ["pixel_values"]
def __init__( self : str , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PIL.Image.BICUBIC , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : int , ):
super().__init__(**_lowercase )
__UpperCAmelCase = size if size is not None else {'''height''': 2_56, '''width''': 2_56}
__UpperCAmelCase = get_size_dict(_lowercase )
__UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_center_crop
__UpperCAmelCase = crop_size
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self : Tuple , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PIL.Image.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Union[str, Any] , ):
__UpperCAmelCase = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
_lowercase , size=(size['''height'''], size['''width''']) , resample=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : List[str] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[Any] , ):
__UpperCAmelCase = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(_lowercase , size=(size['''height'''], size['''width''']) , data_format=_lowercase , **_lowercase )
def a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Any , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : Dict , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Tuple , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def a ( self : Optional[int] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : Dict=None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : List[Any] , ):
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(_lowercase )
__UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase = get_size_dict(_lowercase , param_name='''crop_size''' )
__UpperCAmelCase = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
__UpperCAmelCase = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
| 332
|
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[float] , snake_case_ :list[float] ):
__UpperCAmelCase = sorted(numsa + numsa )
__UpperCAmelCase , __UpperCAmelCase = divmod(len(snake_case_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : int = [float(x) for x in input('Enter the elements of first array: ').split()]
_lowercase : Tuple = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""")
| 332
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
SCREAMING_SNAKE_CASE = frozenset([] )
def _lowerCAmelCase( self ) -> int:
torch.manual_seed(0 )
lowercase__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , )
lowercase__ : List[str] = PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
lowercase__ : Optional[int] = CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowercase__ : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__ : List[str] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=0 ) -> Optional[Any]:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowercase__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ : List[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('''RGB''' ).resize((64, 64) )
lowercase__ : List[str] = Image.fromarray(np.uinta(image + 4 ) ).convert('''RGB''' ).resize((64, 64) )
if str(_SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowercase__ : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowercase__ : Union[str, Any] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : Dict = self.get_dummy_components()
lowercase__ : int = StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE )
lowercase__ : str = sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = sd_pipe(**_SCREAMING_SNAKE_CASE ).images
lowercase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : Optional[int] = np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCAmelCase( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase( self ) -> Tuple:
lowercase__ : Dict = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench.npy''' )
lowercase__ : Optional[int] = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : Dict = StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Dict = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase__ : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _lowerCAmelCase( self ) -> str:
lowercase__ : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ : str = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'''
'''/yellow_cat_sitting_on_a_park_bench_fp16.npy''' )
lowercase__ : Dict = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type='''np''' , )
lowercase__ : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _lowerCAmelCase( self ) -> int:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-inpaint/init_image.png''' )
lowercase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png''' )
lowercase__ : Any = "stabilityai/stable-diffusion-2-inpainting"
lowercase__ : Dict = PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='''scheduler''' )
lowercase__ : List[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__ : List[str] = "Face of a yellow cat, high resolution, sitting on a park bench"
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Any = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
lowercase__ : Union[str, Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 364
|
'''simple docstring'''
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a: Tuple = datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = "utf-8"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True # deprecated
SCREAMING_SNAKE_CASE = None # deprecated
SCREAMING_SNAKE_CASE = 1_0 << 2_0 # 10MB
SCREAMING_SNAKE_CASE = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = JsonConfig
def _lowerCAmelCase( self ) -> Any:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase__ : Tuple = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowercase__ : List[str] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__lowerCAmelCase , (str, list, tuple) ):
lowercase__ : List[str] = data_files
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = [files]
lowercase__ : Dict = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase__ : str = []
for split_name, files in data_files.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : List[str] = [files]
lowercase__ : Optional[Any] = [dl_manager.iter_files(__lowerCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__lowerCAmelCase , gen_kwargs={'''files''': files} ) )
return splits
def _lowerCAmelCase( self , __lowerCAmelCase ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase__ : Optional[int] = self.config.features.arrow_schema.field(__lowerCAmelCase ).type
lowercase__ : Union[str, Any] = pa_table.append_column(__lowerCAmelCase , pa.array([None] * len(__lowerCAmelCase ) , type=__lowerCAmelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase__ : Dict = table_cast(__lowerCAmelCase , self.config.features.arrow_schema )
return pa_table
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(__lowerCAmelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Dict = json.load(__lowerCAmelCase )
# We keep only the field we are interested in
lowercase__ : Any = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(__lowerCAmelCase , (list, tuple) ):
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : List[Any] = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
else:
lowercase__ : Union[str, Any] = dataset
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
yield file_idx, self._cast_table(__lowerCAmelCase )
# If the file has one json object per line
else:
with open(__lowerCAmelCase , '''rb''' ) as f:
lowercase__ : Optional[int] = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase__ : List[str] = max(self.config.chunksize // 32 , 16 << 10 )
lowercase__ : Union[str, Any] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase__ : Tuple = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(__lowerCAmelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase__ : Dict = batch.decode(self.config.encoding , errors=__lowerCAmelCase ).encode('''utf-8''' )
try:
while True:
try:
lowercase__ : str = paj.read_json(
io.BytesIO(__lowerCAmelCase ) , read_options=paj.ReadOptions(block_size=__lowerCAmelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(__lowerCAmelCase , pa.ArrowInvalid )
and "straddling" not in str(__lowerCAmelCase )
or block_size > len(__lowerCAmelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(__lowerCAmelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
__lowerCAmelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase__ : Optional[int] = json.load(__lowerCAmelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(__lowerCAmelCase , __lowerCAmelCase ): # list is the only sequence type supported in JSON
try:
lowercase__ : List[Any] = set().union(*[row.keys() for row in dataset] )
lowercase__ : str = {col: [row.get(__lowerCAmelCase ) for row in dataset] for col in keys}
lowercase__ : Optional[int] = pa.Table.from_pydict(__lowerCAmelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(__lowerCAmelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(__lowerCAmelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__lowerCAmelCase )
batch_idx += 1
| 214
| 0
|
def A_ ( A__ ) -> Any:
if not all(char in '01' for char in bin_string ):
raise ValueError('Non-binary value was passed to the function' )
if not bin_string:
raise ValueError('Empty string was passed to the function' )
a__ : str = ""
while len(SCREAMING_SNAKE_CASE_ ) % 3 != 0:
a__ : Optional[int] = "0" + bin_string
a__ : List[str] = [
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
a__ : int = 0
for index, val in enumerate(SCREAMING_SNAKE_CASE_ ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE_ ) )
oct_string += str(SCREAMING_SNAKE_CASE_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283
| 0
|
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
a__ : str = datasets.utils.logging.get_logger(__name__)
@dataclass
class a_ ( datasets.BuilderConfig ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[datasets.Features] = None
__SCREAMING_SNAKE_CASE : str = "utf-8"
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : Optional[str] = None
__SCREAMING_SNAKE_CASE : bool = True # deprecated
__SCREAMING_SNAKE_CASE : Optional[int] = None # deprecated
__SCREAMING_SNAKE_CASE : int = 10 << 20 # 10MB
__SCREAMING_SNAKE_CASE : Optional[bool] = None
class a_ ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = JsonConfig
def __lowerCAmelCase ( self ) ->Dict:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
SCREAMING_SNAKE_CASE : List[str] = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowerCamelCase , (str, list, tuple) ):
SCREAMING_SNAKE_CASE : Dict = data_files
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : List[str] = [files]
SCREAMING_SNAKE_CASE : Tuple = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
SCREAMING_SNAKE_CASE : Any = []
for split_name, files in data_files.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE : Optional[int] = [files]
SCREAMING_SNAKE_CASE : List[Any] = [dl_manager.iter_files(_lowerCamelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=_lowerCamelCase , gen_kwargs={'''files''': files} ) )
return splits
def __lowerCAmelCase ( self , _lowerCamelCase ) ->pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
SCREAMING_SNAKE_CASE : int = self.config.features.arrow_schema.field(_lowerCamelCase ).type
SCREAMING_SNAKE_CASE : Any = pa_table.append_column(_lowerCamelCase , pa.array([None] * len(_lowerCamelCase ) , type=_lowerCamelCase ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE : str = table_cast(_lowerCamelCase , self.config.features.arrow_schema )
return pa_table
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Optional[Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowerCamelCase ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE : List[str] = json.load(_lowerCamelCase )
# We keep only the field we are interested in
SCREAMING_SNAKE_CASE : Optional[int] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_lowerCamelCase , (list, tuple) ):
SCREAMING_SNAKE_CASE : List[Any] = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE : int = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
else:
SCREAMING_SNAKE_CASE : int = dataset
SCREAMING_SNAKE_CASE : str = pa.Table.from_pydict(_lowerCamelCase )
yield file_idx, self._cast_table(_lowerCamelCase )
# If the file has one json object per line
else:
with open(_lowerCamelCase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
SCREAMING_SNAKE_CASE : int = max(self.config.chunksize // 32 , 16 << 10 )
SCREAMING_SNAKE_CASE : Optional[int] = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
SCREAMING_SNAKE_CASE : str = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_lowerCamelCase )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
SCREAMING_SNAKE_CASE : Optional[Any] = batch.decode(self.config.encoding , errors=_lowerCamelCase ).encode('''utf-8''' )
try:
while True:
try:
SCREAMING_SNAKE_CASE : str = paj.read_json(
io.BytesIO(_lowerCamelCase ) , read_options=paj.ReadOptions(block_size=_lowerCamelCase ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_lowerCamelCase , pa.ArrowInvalid )
and "straddling" not in str(_lowerCamelCase )
or block_size > len(_lowerCamelCase )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
F"""Batch of {len(_lowerCamelCase )} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.""" )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_lowerCamelCase , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(_lowerCamelCase )
except json.JSONDecodeError:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ): # list is the only sequence type supported in JSON
try:
SCREAMING_SNAKE_CASE : Tuple = set().union(*[row.keys() for row in dataset] )
SCREAMING_SNAKE_CASE : Optional[Any] = {col: [row.get(_lowerCamelCase ) for row in dataset] for col in keys}
SCREAMING_SNAKE_CASE : str = pa.Table.from_pydict(_lowerCamelCase )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise ValueError(F"""Not able to read records in the JSON file at {file}.""" ) from None
yield file_idx, self._cast_table(_lowerCamelCase )
break
else:
logger.error(F"""Failed to read file '{file}' with error {type(_lowerCamelCase )}: {e}""" )
raise ValueError(
F"""Not able to read records in the JSON file at {file}. """
F"""You should probably indicate the field of the JSON file containing your records. """
F"""This JSON file contain the following fields: {str(list(dataset.keys() ) )}. """
F"""Select the correct one and provide it as `field='XXX'` to the dataset loading method. """ ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_lowerCamelCase )
batch_idx += 1
| 353
|
import datasets
from .evaluate import evaluate
a__ : Dict = '''\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
'''
a__ : List[str] = '''
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
'''
a__ : List[Any] = '''
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair as given in the references (see below)
- \'prediction_text\': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- \'id\': id of the question-answer pair (see above),
- \'answers\': a Dict in the CUAD dataset format
{
\'text\': list of possible texts for the answer, as a list of strings
\'answer_start\': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
\'exact_match\': Exact match (the normalized answer exactly match the gold answer)
\'f1\': The F-score of predicted tokens versus the gold answer
\'aupr\': Area Under the Precision-Recall curve
\'prec_at_80_recall\': Precision at 80% recall
\'prec_at_90_recall\': Precision at 90% recall
Examples:
>>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]
>>> cuad_metric = datasets.load_metric("cuad")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
"""simple docstring"""
def __lowerCAmelCase ( self ) ->Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://www.atticusprojectai.org/cuad'''] , reference_urls=['''https://www.atticusprojectai.org/cuad'''] , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase ) ->Tuple:
SCREAMING_SNAKE_CASE : Any = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
SCREAMING_SNAKE_CASE : int = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
SCREAMING_SNAKE_CASE : Dict = evaluate(dataset=_lowerCamelCase , predictions=_lowerCamelCase )
return score
| 19
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__A : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
__A : ClassVar[Features] = Features({'''summary''': Value('''string''' )} )
__A : str = "text"
__A : str = "summary"
@property
def __lowercase ( self) -> Dict[str, str]:
'''simple docstring'''
return {self.text_column: "text", self.summary_column: "summary"}
| 99
|
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=2 , lowercase=3 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase=[1, 16, 4, 4] , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = parent
a__ : Optional[int] = batch_size
a__ : Any = image_size
a__ : Optional[Any] = patch_size
a__ : Optional[Any] = num_channels
a__ : int = is_training
a__ : List[str] = use_labels
a__ : List[str] = hidden_size
a__ : Tuple = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : Union[str, Any] = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Optional[Any] = hidden_dropout_prob
a__ : Any = attention_probs_dropout_prob
a__ : Any = type_sequence_label_size
a__ : Tuple = initializer_range
a__ : Tuple = scope
a__ : int = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
a__ : Any = (self.image_size // 32) ** 2
a__ : List[Any] = num_patches + 1
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : int = None
if self.use_labels:
a__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : List[str] = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowercase , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : List[str] = ViTHybridModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = self.type_sequence_label_size
a__ : Union[str, Any] = ViTHybridForImageClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
a__ , a__ , a__ : Union[str, Any] = config_and_inputs
a__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__A : List[str] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__A : Any = False
__A : Optional[int] = False
__A : Optional[Any] = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = ViTHybridModelTester(self)
a__ : Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : str = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> int:
'''simple docstring'''
a__ , a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : Union[str, Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[Any] = [*signature.parameters.keys()]
a__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ , a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = _config_zero_init(lowercase)
for model_class in self.all_model_classes:
a__ : List[Any] = model_class(config=lowercase)
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
a__ : Dict = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = ViTHybridModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(
lowercase)
a__ : List[str] = self.default_image_processor
a__ : List[Any] = prepare_img()
a__ : Any = image_processor(images=lowercase , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Optional[Any] = model(**lowercase)
# verify the logits
a__ : Optional[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Any = torch.tensor([-1.90_90, -0.49_93, -0.23_89]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
@slow
@require_accelerate
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384')
a__ : Union[str, Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto')
a__ : Any = prepare_img()
a__ : str = image_processor(images=lowercase , return_tensors='pt')
a__ : List[Any] = model(**lowercase)
a__ : int = outputs.logits
# model predicts one of the 1000 ImageNet classes
a__ : List[str] = logits.argmax(-1).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat')
| 99
| 1
|
"""simple docstring"""
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Optional[Any] = n
_lowerCamelCase : Tuple = [None] * self.n
_lowerCamelCase : Any = 0 # index of the first element
_lowerCamelCase : Any = 0
_lowerCamelCase : Optional[int] = 0
def __len__( self ):
return self.size
def A_ ( self ):
return self.size == 0
def A_ ( self ):
return False if self.is_empty() else self.array[self.front]
def A_ ( self , lowercase ):
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
_lowerCamelCase : str = data
_lowerCamelCase : Optional[Any] = (self.rear + 1) % self.n
self.size += 1
return self
def A_ ( self ):
if self.size == 0:
raise Exception('UNDERFLOW' )
_lowerCamelCase : Union[str, Any] = self.array[self.front]
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Union[str, Any] = (self.front + 1) % self.n
self.size -= 1
return temp
| 366
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 12
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : str=False , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : List[str]=9_9 , UpperCAmelCase__ : Union[str, Any]=3_2 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : str=3_7 , UpperCAmelCase__ : Optional[int]="gelu" , UpperCAmelCase__ : str=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : Dict=5_1_2 , UpperCAmelCase__ : int=1_6 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Dict=0.02 , UpperCAmelCase__ : int=3 , UpperCAmelCase__ : int=4 , UpperCAmelCase__ : Any=None , ) -> Dict:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_input_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_labels
lowerCAmelCase = num_choices
lowerCAmelCase = scope
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Dict ) -> str:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=UpperCAmelCase__ , )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[int] ) -> Dict:
lowerCAmelCase = FalconModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , ) -> Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = FalconModel(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , )
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , ) -> Tuple:
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , ) -> Optional[int]:
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = FalconForCausalLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
# first forward pass
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , use_cache=UpperCAmelCase__ , )
lowerCAmelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowerCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCAmelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowerCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase = torch.cat([input_mask, next_mask] , dim=-1 )
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
lowerCAmelCase = model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , encoder_attention_mask=UpperCAmelCase__ , past_key_values=UpperCAmelCase__ , output_hidden_states=UpperCAmelCase__ , )['hidden_states'][0]
# select random slice
lowerCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
lowerCAmelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCAmelCase__ , UpperCAmelCase__ , atol=1E-3 ) )
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : List[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase : List[Any] = (FalconForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : str = False
lowerCamelCase : Tuple = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = FalconModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
lowerCAmelCase , *lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
lowerCAmelCase = alibi
self.model_tester.create_and_check_model(UpperCAmelCase__ , *UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'single_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> int:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = FalconForCausalLM(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , use_cache=UpperCAmelCase__ )
lowerCAmelCase = input_ids.shape[0]
lowerCAmelCase = model._convert_to_rw_cache(result.past_key_values )
lowerCAmelCase = model._convert_cache_to_standard_format(UpperCAmelCase__ , UpperCAmelCase__ )
for layer in range(len(UpperCAmelCase__ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def __UpperCAmelCase ( self : int ) -> Dict:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = 'multi_label_classification'
lowerCAmelCase = input_dict['input_ids']
lowerCAmelCase = input_ids.ne(1 ).to(UpperCAmelCase__ )
lowerCAmelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
lowerCAmelCase = FalconForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(UpperCAmelCase__ , 'use_cache' ):
return
lowerCAmelCase = model_class(UpperCAmelCase__ ).to(UpperCAmelCase__ )
if "use_cache" not in inputs:
lowerCAmelCase = True
lowerCAmelCase = model(**UpperCAmelCase__ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
lowerCAmelCase = (
getattr(UpperCAmelCase__ , 'decoder_layers' , UpperCAmelCase__ )
or getattr(UpperCAmelCase__ , 'num_decoder_layers' , UpperCAmelCase__ )
or config.num_hidden_layers
)
lowerCAmelCase = getattr(UpperCAmelCase__ , 'num_kv_heads' , config.num_attention_heads )
lowerCAmelCase = getattr(UpperCAmelCase__ , 'd_model' , config.hidden_size )
lowerCAmelCase = embed_dim // num_attention_heads
lowerCAmelCase = outputs['past_key_values']
self.assertEqual(len(UpperCAmelCase__ ) , UpperCAmelCase__ )
lowerCAmelCase , lowerCAmelCase = inputs['input_ids'].shape
for i in range(UpperCAmelCase__ ):
if config.new_decoder_architecture:
lowerCAmelCase = config.num_attention_heads
elif config.multi_query:
lowerCAmelCase = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Optional[int] ) -> List[str]:
lowerCAmelCase = AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
lowerCAmelCase = FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
lowerCAmelCase = (
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=1_9 )
lowerCAmelCase = tokenizer.batch_decode(UpperCAmelCase__ )[0]
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=4 )
model.generate(**UpperCAmelCase__ , num_beams=2 , max_new_tokens=4 )
@slow
def __UpperCAmelCase ( self : Dict ) -> Any:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
lowerCAmelCase = AutoTokenizer.from_pretrained(UpperCAmelCase__ )
lowerCAmelCase = FalconForCausalLM.from_pretrained(UpperCAmelCase__ )
model.eval()
model.to(device=UpperCAmelCase__ )
lowerCAmelCase = tokenizer('My favorite food is' , return_tensors='pt' ).to(UpperCAmelCase__ )
# Test results are the same with and without cache
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
lowerCAmelCase = model.generate(**UpperCAmelCase__ , do_sample=UpperCAmelCase__ , max_new_tokens=2_0 , use_cache=UpperCAmelCase__ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 4
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=14, lowerCamelCase : List[str]=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : List[str]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[str]=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Dict=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], *lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase, token_type_ids=lowerCamelCase, head_mask=lowerCamelCase )
model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], *lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Any, *lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, n_embd=37 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=lowerCamelCase ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(lowerCamelCase, do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCamelCase )
| 207
| 0
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCAmelCase : Dict = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : Union[int, Iterable[int]] , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
def constraint_to_multiple_of(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Optional[int]=None ):
UpperCamelCase : Union[str, Any] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase : Union[str, Any] = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase : Union[str, Any] = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase : Optional[int] = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else output_size
UpperCamelCase , UpperCamelCase : Optional[int] = get_image_size(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase : int = output_size
# determine new height and width
UpperCamelCase : str = output_height / input_height
UpperCamelCase : Union[str, Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase : Union[str, Any] = scale_width
else:
# fit height
UpperCamelCase : str = scale_height
UpperCamelCase : int = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE_ )
return (new_height, new_width)
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = ["pixel_values"]
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BILINEAR , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 255 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = size if size is not None else {'''height''': 384, '''width''': 384}
UpperCamelCase : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = do_resize
UpperCamelCase : Optional[int] = size
UpperCamelCase : int = keep_aspect_ratio
UpperCamelCase : Union[str, Any] = ensure_multiple_of
UpperCamelCase : Dict = resample
UpperCamelCase : Optional[Any] = do_rescale
UpperCamelCase : Optional[Any] = rescale_factor
UpperCamelCase : List[str] = do_normalize
UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase : List[str] = get_resize_output_image_size(
__SCREAMING_SNAKE_CASE , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=__SCREAMING_SNAKE_CASE , multiple=__SCREAMING_SNAKE_CASE , )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = size if size is not None else self.size
UpperCamelCase : Optional[Any] = get_size_dict(__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase : Tuple = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase : Optional[int] = resample if resample is not None else self.resample
UpperCamelCase : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : Union[str, Any] = image_std if image_std is not None else self.image_std
UpperCamelCase : str = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase : List[Any] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
UpperCamelCase : List[str] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
UpperCamelCase : Optional[Any] = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
UpperCamelCase : Dict = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase : Union[str, Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
UpperCamelCase : str = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
UpperCamelCase : Any = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = target_sizes.numpy()
UpperCamelCase : Union[str, Any] = []
for idx in range(len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase : str = logits.argmax(dim=1 )
UpperCamelCase : Optional[int] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 315
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , *__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = eval_examples
UpperCamelCase : Optional[Any] = post_process_function
def _lowercase ( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "eval" ):
"""simple docstring"""
UpperCamelCase : int = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCamelCase : int = self.get_eval_dataloader(__SCREAMING_SNAKE_CASE )
UpperCamelCase : str = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Any = self.compute_metrics
UpperCamelCase : List[Any] = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Dict = time.time()
try:
UpperCamelCase : str = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : Union[str, Any] = compute_metrics
UpperCamelCase : Any = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions )
UpperCamelCase : Optional[Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Dict = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
else:
UpperCamelCase : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__SCREAMING_SNAKE_CASE )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCamelCase : Any = self.callback_handler.on_evaluate(self.args , self.state , self.control , __SCREAMING_SNAKE_CASE )
return metrics
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE = "test" ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_test_dataloader(__SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCamelCase : Union[str, Any] = self.compute_metrics
UpperCamelCase : Tuple = None
UpperCamelCase : str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
UpperCamelCase : Optional[int] = time.time()
try:
UpperCamelCase : int = eval_loop(
__SCREAMING_SNAKE_CASE , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__SCREAMING_SNAKE_CASE , metric_key_prefix=__SCREAMING_SNAKE_CASE , )
finally:
UpperCamelCase : int = compute_metrics
UpperCamelCase : Dict = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCamelCase : Dict = self.post_process_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , output.predictions , '''predict''' )
UpperCamelCase : Union[str, Any] = self.compute_metrics(__SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
UpperCamelCase : Any = metrics.pop(__SCREAMING_SNAKE_CASE )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__SCREAMING_SNAKE_CASE )
| 315
| 1
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCamelCase_ ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[List[float]] = None , _UpperCAmelCase : Optional[int] = None , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.""" )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
else:
return _interleave_iterable_datasets(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , stopping_strategy=_UpperCAmelCase )
def UpperCamelCase_ ( _UpperCAmelCase : List[DatasetType] , _UpperCAmelCase : Optional[DatasetInfo] = None , _UpperCAmelCase : Optional[NamedSplit] = None , _UpperCAmelCase : int = 0 , ) -> DatasetType:
"""simple docstring"""
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCAmelCase ):
if not isinstance(_UpperCAmelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCAmelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_UpperCAmelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCAmelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCAmelCase ).__name__}.""" )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Dict = (
(Dataset, IterableDataset) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
else:
return _concatenate_iterable_datasets(_UpperCAmelCase , info=_UpperCAmelCase , split=_UpperCAmelCase , axis=_UpperCAmelCase )
| 31
|
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
snake_case_ = logging.get_logger(__name__)
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
def snake_case__ ( SCREAMING_SNAKE_CASE_ : type , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , ):
'''simple docstring'''
lowercase__ : Union[str, Any] = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
f"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
lowercase__ : Any = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
f"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
lowercase__ : int = format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Exception , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None ):
'''simple docstring'''
lowercase__ : Optional[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
lowercase__ : Any = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
snake_case_ = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
snake_case_ = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
snake_case_ = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] ):
'''simple docstring'''
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : Any ):
'''simple docstring'''
lowercase__ : Tuple = get_format_type_from_alias(SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**SCREAMING_SNAKE_CASE_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
f"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" )
| 214
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__UpperCamelCase : Dict = TypeVar('''T''')
def __SCREAMING_SNAKE_CASE ( A_ ):
return (position - 1) // 2
def __SCREAMING_SNAKE_CASE ( A_ ):
return (2 * position) + 1
def __SCREAMING_SNAKE_CASE ( A_ ):
return (2 * position) + 2
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : Dict ):
lowerCAmelCase__ : list[tuple[T, int]] = []
lowerCAmelCase__ : dict[T, int] = {}
lowerCAmelCase__ : int = 0
def __len__( self : Tuple ):
return self.elements
def __repr__( self : int ):
return str(self.heap )
def __lowerCAmelCase ( self : Tuple ):
# Check if the priority queue is empty
return self.elements == 0
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : T ,lowercase_ : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowerCAmelCase__ : Tuple = self.elements
self.elements += 1
self._bubble_up(lowercase_ )
def __lowerCAmelCase ( self : Any ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
lowerCAmelCase__ ,lowerCAmelCase__ : int = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowerCAmelCase__ ,lowerCAmelCase__ : Union[str, Any] = self.heap[0]
self._bubble_down(lowercase_ )
return elem
def __lowerCAmelCase ( self : Any ,lowercase_ : T ,lowercase_ : int ):
# Update the weight of the given key
lowerCAmelCase__ : List[Any] = self.position_map[elem]
lowerCAmelCase__ : Optional[Any] = (elem, weight)
if position > 0:
lowerCAmelCase__ : Union[str, Any] = get_parent_position(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[int] = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase_ )
else:
self._bubble_down(lowercase_ )
else:
self._bubble_down(lowercase_ )
def __lowerCAmelCase ( self : int ,lowercase_ : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowerCAmelCase__ : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
lowerCAmelCase__ : int = get_parent_position(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ : int = self.heap[curr_pos]
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase_ ,lowercase_ )
return self._bubble_up(lowercase_ )
return None
def __lowerCAmelCase ( self : Union[str, Any] ,lowercase_ : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowerCAmelCase__ : int = self.position_map[elem]
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = self.heap[curr_pos]
lowerCAmelCase__ : Optional[Any] = get_child_left_position(lowercase_ )
lowerCAmelCase__ : List[Any] = get_child_right_position(lowercase_ )
if child_left_position < self.elements and child_right_position < self.elements:
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.heap[child_left_position]
lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase_ ,lowercase_ )
return self._bubble_down(lowercase_ )
if child_left_position < self.elements:
lowerCAmelCase__ ,lowerCAmelCase__ : Optional[Any] = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase_ ,lowercase_ )
return self._bubble_down(lowercase_ )
else:
return None
if child_right_position < self.elements:
lowerCAmelCase__ ,lowerCAmelCase__ : Dict = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase_ ,lowercase_ )
return self._bubble_down(lowercase_ )
return None
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : int ,lowercase_ : int ):
# Swap the nodes at the given positions
lowerCAmelCase__ : Union[str, Any] = self.heap[nodea_pos][0]
lowerCAmelCase__ : Optional[Any] = self.heap[nodea_pos][0]
lowerCAmelCase__ ,lowerCAmelCase__ : Tuple = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowerCAmelCase__ : Dict = nodea_pos
lowerCAmelCase__ : int = nodea_pos
class SCREAMING_SNAKE_CASE ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
lowerCAmelCase__ : dict[T, dict[T, int]] = {}
lowerCAmelCase__ : int = 0
def __repr__( self : Optional[Any] ):
return str(self.connections )
def __len__( self : Tuple ):
return self.nodes
def __lowerCAmelCase ( self : Optional[int] ,lowercase_ : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowerCAmelCase__ : Tuple = {}
self.nodes += 1
def __lowerCAmelCase ( self : str ,lowercase_ : T ,lowercase_ : T ,lowercase_ : int ):
# Add an edge between 2 nodes in the graph
self.add_node(lowercase_ )
self.add_node(lowercase_ )
lowerCAmelCase__ : Any = weight
lowerCAmelCase__ : Any = weight
def __SCREAMING_SNAKE_CASE ( A_ , ):
lowerCAmelCase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowerCAmelCase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowerCAmelCase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(A_ , A_ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowerCAmelCase__ : Tuple = priority_queue.extract_min()
lowerCAmelCase__ : Optional[int] = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
lowerCAmelCase__ : Any = node
# running prim's algorithm
while not priority_queue.is_empty():
lowerCAmelCase__ : List[Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowerCAmelCase__ : List[Any] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(A_ , dist[neighbour] )
lowerCAmelCase__ : List[str] = node
return dist, parent
| 74
|
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Any = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 1
|
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class a ( snake_case_ ):
_lowerCAmelCase = """encodec"""
def __init__( self , __magic_name__=[1.5, 3.0, 6.0, 12.0, 24.0] , __magic_name__=2_40_00 , __magic_name__=1 , __magic_name__=False , __magic_name__=None , __magic_name__=None , __magic_name__=1_28 , __magic_name__=32 , __magic_name__=1 , __magic_name__=[8, 5, 4, 2] , __magic_name__="weight_norm" , __magic_name__=7 , __magic_name__=7 , __magic_name__=3 , __magic_name__=2 , __magic_name__=True , __magic_name__="reflect" , __magic_name__=2 , __magic_name__=2 , __magic_name__=1.0 , __magic_name__=10_24 , __magic_name__=None , __magic_name__=True , **__magic_name__ , ) -> Tuple:
_a = target_bandwidths
_a = sampling_rate
_a = audio_channels
_a = normalize
_a = chunk_length_s
_a = overlap
_a = hidden_size
_a = num_filters
_a = num_residual_layers
_a = upsampling_ratios
_a = norm_type
_a = kernel_size
_a = last_kernel_size
_a = residual_kernel_size
_a = dilation_growth_rate
_a = use_causal_conv
_a = pad_mode
_a = compress
_a = num_lstm_layers
_a = trim_right_ratio
_a = codebook_size
_a = codebook_dim if codebook_dim is not None else hidden_size
_a = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**__magic_name__ )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __UpperCAmelCase ( self ) -> int:
_a = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __UpperCAmelCase ( self ) -> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 168
|
__A ={str(digit): digit**5 for digit in range(1_0)}
def lowerCamelCase_ ( lowerCamelCase__ ):
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(lowerCamelCase__ ) )
def lowerCamelCase_ ( ):
return sum(
number
for number in range(1_0_0_0 , 1_0_0_0_0_0_0 )
if number == digits_fifth_powers_sum(lowerCamelCase__ ) )
if __name__ == "__main__":
print(solution())
| 19
| 0
|
"""simple docstring"""
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , A_ ):
if isinstance(A_ , A_ ):
lowerCAmelCase__ : Tuple = np.full((len(A_ ), sequence_length, 2) , A_ )
else:
lowerCAmelCase__ : Optional[Any] = np.full((len(A_ ), sequence_length) , A_ )
for i, tensor in enumerate(A_ ):
if padding_side == "right":
if isinstance(A_ , A_ ):
lowerCAmelCase__ : List[Any] = tensor[:sequence_length]
else:
lowerCAmelCase__ : Union[str, Any] = tensor[:sequence_length]
else:
if isinstance(A_ , A_ ):
lowerCAmelCase__ : List[Any] = tensor[:sequence_length]
else:
lowerCAmelCase__ : List[str] = tensor[:sequence_length]
return out_tensor.tolist()
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : str = ord(A_ )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 1_23 and cp <= 1_26):
return True
lowerCAmelCase__ : Union[str, Any] = unicodedata.category(A_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = 42
lowercase__ = True
lowercase__ = None
lowercase__ = None
lowercase__ = -100
lowercase__ = "pt"
def __lowerCAmelCase ( self : Tuple ,lowercase_ : Union[str, Any] ):
import torch
lowerCAmelCase__ : Tuple = '''label''' if '''label''' in features[0].keys() else '''labels'''
lowerCAmelCase__ : List[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowerCAmelCase__ : Any = self.tokenizer.pad(
lowercase_ ,padding=self.padding ,max_length=self.max_length ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors='''pt''' if labels is None else None ,)
if labels is None:
return batch
lowerCAmelCase__ : List[str] = torch.tensor(batch['''entity_ids'''] ).shape[1]
lowerCAmelCase__ : int = self.tokenizer.padding_side
if padding_side == "right":
lowerCAmelCase__ : List[str] = [
list(lowercase_ ) + [self.label_pad_token_id] * (sequence_length - len(lowercase_ )) for label in labels
]
else:
lowerCAmelCase__ : List[Any] = [
[self.label_pad_token_id] * (sequence_length - len(lowercase_ )) + list(lowercase_ ) for label in labels
]
lowerCAmelCase__ : str = [feature['''ner_tags'''] for feature in features]
lowerCAmelCase__ : Tuple = padding_tensor(lowercase_ ,-1 ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : List[Any] = [feature['''original_entity_spans'''] for feature in features]
lowerCAmelCase__ : int = padding_tensor(lowercase_ ,(-1, -1) ,lowercase_ ,lowercase_ )
lowerCAmelCase__ : Optional[int] = {k: torch.tensor(lowercase_ ,dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 357
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 74
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __lowerCAmelCase ( snake_case__ ):
def wrapper(*snake_case__ , **snake_case__ ):
__UpperCamelCase : List[Any] = timeit.default_timer()
__UpperCamelCase : Optional[Any] = func(*A__ , **A__ )
__UpperCamelCase : List[Any] = timeit.default_timer() - starttime
return delta
__UpperCamelCase : Optional[int] = func.__name__
return wrapper
def __lowerCAmelCase ( snake_case__ , snake_case__=100 , snake_case__=None ):
__UpperCamelCase : str = []
__UpperCamelCase : Dict = seq_shapes or {}
for i in range(A__ ):
__UpperCamelCase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(A__ , _ArrayXD ):
__UpperCamelCase : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(A__ , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase : Optional[Any] = "The small grey turtle was surprisingly fast when challenged."
else:
__UpperCamelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(A__ , datasets.Sequence ):
while isinstance(A__ , datasets.Sequence ):
__UpperCamelCase : int = v.feature
__UpperCamelCase : Union[str, Any] = seq_shapes[k]
__UpperCamelCase : Dict = np.random.rand(*A__ ).astype(v.dtype )
__UpperCamelCase : Any = data
dummy_data.append((i, example) )
return dummy_data
def __lowerCAmelCase ( snake_case__ , snake_case__ , snake_case__=100 , snake_case__=None ):
__UpperCamelCase : Dict = generate_examples(A__ , num_examples=A__ , seq_shapes=A__ )
with ArrowWriter(features=A__ , path=A__ ) as writer:
for key, record in dummy_data:
__UpperCamelCase : Union[str, Any] = features.encode_example(A__ )
writer.write(A__ )
__UpperCamelCase , __UpperCamelCase : Optional[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
__UpperCamelCase : int = datasets.Dataset.from_file(filename=A__ , info=datasets.DatasetInfo(features=A__ ) )
return dataset
| 298
|
from __future__ import annotations
def lowerCamelCase__ ( A__ : list[int] , A__ : list[int] , A__ : list[int] , A__ : list[list[str]] , A__ : int , ):
'''simple docstring'''
__lowerCamelCase = len(A__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(A__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , A__ , A__ , )
def lowerCamelCase__ ( A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
depth_first_search([] , [] , [] , A__ , A__ )
# Print all the boards
for board in boards:
for column in board:
print(A__ )
print("""""" )
print(len(A__ ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 12
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCAmelCase__ ( lowerCamelCase_ : List[str]):
'''simple docstring'''
return {key.lstrip('''-'''): value for key, value in zip(unknown_args[::2] ,unknown_args[1::2])}
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ArgumentParser(
'''HuggingFace Datasets CLI tool''' ,usage='''datasets-cli <command> [<args>]''' ,allow_abbrev=lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = parser.add_subparsers(help='''datasets-cli command helpers''')
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCamelCase_)
EnvironmentCommand.register_subcommand(lowerCamelCase_)
TestCommand.register_subcommand(lowerCamelCase_)
RunBeamCommand.register_subcommand(lowerCamelCase_)
DummyDataCommand.register_subcommand(lowerCamelCase_)
# Parse args
lowerCAmelCase__ : int = parser.parse_known_args()
if not hasattr(lowerCamelCase_ ,'''func'''):
parser.print_help()
exit(1)
lowerCAmelCase__ : str = parse_unknown_args(lowerCamelCase_)
# Run
lowerCAmelCase__ : str = args.func(lowerCamelCase_ ,**lowerCamelCase_)
service.run()
if __name__ == "__main__":
main()
| 359
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case : str ={
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Tuple =['ConditionalDetrFeatureExtractor']
__snake_case : Union[str, Any] =['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =[
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__snake_case : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 94
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowercase_ ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = ShapEImgaImgPipeline
UpperCAmelCase : Dict = ['''image''']
UpperCAmelCase : List[Any] = ['''image''']
UpperCAmelCase : Optional[Any] = [
'''num_images_per_prompt''',
'''num_inference_steps''',
'''generator''',
'''latents''',
'''guidance_scale''',
'''frame_size''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase : Union[str, Any] = False
@property
def lowerCAmelCase_ ( self : List[str] ):
return 32
@property
def lowerCAmelCase_ ( self : str ):
return 32
@property
def lowerCAmelCase_ ( self : Tuple ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Optional[int] ):
return 8
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_A = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_A = CLIPVisionModel(_UpperCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : Optional[int] ):
_A = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_A = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
_A = PriorTransformer(**_UpperCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : str ):
torch.manual_seed(0 )
_A = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
_A = ShapERenderer(**_UpperCAmelCase )
return model
def lowerCAmelCase_ ( self : List[str] ):
_A = self.dummy_prior
_A = self.dummy_image_encoder
_A = self.dummy_image_processor
_A = self.dummy_renderer
_A = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=_UpperCAmelCase , clip_sample=_UpperCAmelCase , clip_sample_range=1.0 , )
_A = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str]=0 ):
_A = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('mps' ):
_A = torch.manual_seed(_UpperCAmelCase )
else:
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_A = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self : Optional[int] ):
_A = 'cpu'
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
_A = output.images[0]
_A = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_A = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase_ ( self : Tuple ):
_A = torch_device == 'cpu'
_A = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , )
def lowerCAmelCase_ ( self : int ):
_A = self.get_dummy_components()
_A = self.pipeline_class(**_UpperCAmelCase )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = 1
_A = 2
_A = self.get_dummy_inputs(_UpperCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
_A = batch_size * [inputs[key]]
_A = pipe(**_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : str ):
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
_A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
_A = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
_A = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.Generator(device=_UpperCAmelCase ).manual_seed(0 )
_A = pipe(
_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 315
|
"""simple docstring"""
import numpy
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : numpy.ndarray ):
_A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
_A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
_A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
_A = numpy.random.rand(3 , 1 )
# Real output values provided.
_A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
_A = numpy.zeros(output_array.shape )
def lowerCAmelCase_ ( self : List[str] ):
_A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowerCAmelCase_ ( self : Optional[int] ):
_A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
_A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
_A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : numpy.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : bool ):
for iteration in range(1 , iterations + 1 ):
_A = self.feedforward()
self.back_propagation()
if give_loss:
_A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'''Iteration {iteration} Loss: {loss}''' )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : numpy.ndarray ):
_A = input_arr
_A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
_A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
_A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def _snake_case ( _snake_case : numpy.ndarray ) -> numpy.ndarray:
'''simple docstring'''
return (value) * (1 - (value))
def _snake_case ( ) -> int:
'''simple docstring'''
_A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
_A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
_A = TwoHiddenLayerNeuralNetwork(
input_array=_snake_case , output_array=_snake_case )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=_snake_case , iterations=10 , give_loss=_snake_case )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 315
| 1
|
from bisect import bisect
from itertools import accumulate
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :Dict = sorted(zip(lowerCamelCase, lowerCamelCase ), key=lambda lowerCamelCase : x[0] / x[1], reverse=lowerCamelCase )
lowercase , lowercase :int = [i[0] for i in r], [i[1] for i in r]
lowercase :Any = list(accumulate(lowerCamelCase ) )
lowercase :Tuple = bisect(lowerCamelCase, lowerCamelCase )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
|
from math import factorial
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :str = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Tuple = [1] * rank
else:
lowercase :List[str] = rank
def __repr__( self: Optional[Any] ):
return (
F"{self.real}+"
F"{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self: int , _lowerCAmelCase: Tuple ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase :Dict = self.duals.copy()
lowercase :List[str] = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase :Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
_a = __add__
def __sub__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
return self + other * -1
def __mul__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase :Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
_a = __mul__
def __truediv__( self: List[str] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self: int , _lowerCAmelCase: Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self: Any , _lowerCAmelCase: Tuple ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase :Optional[int] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not callable(lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase, (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase :str = Dual(lowerCamelCase, 1 )
lowercase :Dict = func(lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ ( lowerCamelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 158
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''levit'''
def __init__( self : Any ,A_ : Union[str, Any]=224 ,A_ : Dict=3 ,A_ : Tuple=3 ,A_ : str=2 ,A_ : int=1 ,A_ : int=16 ,A_ : Tuple=[128, 256, 384] ,A_ : Any=[4, 8, 12] ,A_ : int=[4, 4, 4] ,A_ : Optional[Any]=[16, 16, 16] ,A_ : Union[str, Any]=0 ,A_ : List[Any]=[2, 2, 2] ,A_ : Tuple=[2, 2, 2] ,A_ : str=0.02 ,**A_ : Tuple ,) -> Dict:
super().__init__(**A_ )
A = image_size
A = num_channels
A = kernel_size
A = stride
A = padding
A = hidden_sizes
A = num_attention_heads
A = depths
A = key_dim
A = drop_path_rate
A = patch_size
A = attention_ratio
A = mlp_ratio
A = initializer_range
A = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
return 1e-4
| 74
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple )
| 74
| 1
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> int:
if not head:
return True
# split the list to two parts
_UpperCAmelCase : Dict = head.next, head
while fast and fast.next:
_UpperCAmelCase : Union[str, Any] = fast.next.next
_UpperCAmelCase : int = slow.next
_UpperCAmelCase : List[str] = slow.next
_UpperCAmelCase : Optional[int] = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCAmelCase : List[Any] = None
while second:
_UpperCAmelCase : Optional[Any] = second.next
_UpperCAmelCase : Union[str, Any] = node
_UpperCAmelCase : Optional[int] = second
_UpperCAmelCase : Union[str, Any] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCAmelCase : Tuple = node.next
_UpperCAmelCase : int = head.next
return True
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Tuple:
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCAmelCase : int = head
while fast and fast.next:
_UpperCAmelCase : int = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCAmelCase : int = [slow.val]
while slow.next:
_UpperCAmelCase : Union[str, Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCAmelCase : Optional[int] = cur.next
return True
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] ) -> Any:
if not head or not head.next:
return True
_UpperCAmelCase : int = {}
_UpperCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(lowerCAmelCase )
else:
_UpperCAmelCase : Any = [pos]
_UpperCAmelCase : Tuple = head.next
pos += 1
_UpperCAmelCase : int = pos - 1
_UpperCAmelCase : Dict = 0
for v in d.values():
if len(lowerCAmelCase ) % 2 != 0:
middle += 1
else:
_UpperCAmelCase : str = 0
for i in range(0 , len(lowerCAmelCase ) ):
if v[i] + v[len(lowerCAmelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 353
|
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'huggingface/autoformer-tourism-monthly': 'https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json',
}
class a ( UpperCAmelCase ):
_lowercase = "autoformer"
_lowercase = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = [1, 2, 3, 4, 5, 6, 7] , A_ = True , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 32 , A_ = 32 , A_ = "gelu" , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_ = True , A_=True , A_ = 10 , A_ = 25 , A_ = 3 , **A_ , ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = prediction_length
_UpperCAmelCase : Dict = context_length if context_length is not None else prediction_length
_UpperCAmelCase : Tuple = distribution_output
_UpperCAmelCase : List[Any] = loss
_UpperCAmelCase : Optional[Any] = input_size
_UpperCAmelCase : int = num_time_features
_UpperCAmelCase : str = lags_sequence
_UpperCAmelCase : Union[str, Any] = scaling
_UpperCAmelCase : Union[str, Any] = num_dynamic_real_features
_UpperCAmelCase : int = num_static_real_features
_UpperCAmelCase : int = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Union[str, Any] = cardinality
else:
_UpperCAmelCase : Any = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(A_ ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : int = embedding_dimension
else:
_UpperCAmelCase : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Union[str, Any] = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Union[str, Any] = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase : int = d_model
_UpperCAmelCase : Any = encoder_attention_heads
_UpperCAmelCase : str = decoder_attention_heads
_UpperCAmelCase : Union[str, Any] = encoder_ffn_dim
_UpperCAmelCase : Any = decoder_ffn_dim
_UpperCAmelCase : Any = encoder_layers
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : Dict = activation_dropout
_UpperCAmelCase : Dict = encoder_layerdrop
_UpperCAmelCase : int = decoder_layerdrop
_UpperCAmelCase : Union[str, Any] = activation_function
_UpperCAmelCase : str = init_std
_UpperCAmelCase : Tuple = use_cache
# Autoformer
_UpperCAmelCase : str = label_length
_UpperCAmelCase : Any = moving_average
_UpperCAmelCase : Optional[Any] = autocorrelation_factor
super().__init__(is_encoder_decoder=A_ , **A_ )
@property
def _UpperCAmelCase ( self ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 189
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=False , __lowercase=False , __lowercase=2 , __lowercase=99 , __lowercase=0 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=2 , __lowercase=0.02 , __lowercase=2 , __lowercase=4 , __lowercase="last" , __lowercase=True , __lowercase=None , __lowercase=0 , ) -> List[Any]:
__UpperCamelCase :str = parent
__UpperCamelCase :Union[str, Any] = batch_size
__UpperCamelCase :Any = seq_length
__UpperCamelCase :str = is_training
__UpperCamelCase :Any = use_input_lengths
__UpperCamelCase :int = use_token_type_ids
__UpperCamelCase :Dict = use_labels
__UpperCamelCase :str = gelu_activation
__UpperCamelCase :Any = sinusoidal_embeddings
__UpperCamelCase :Dict = causal
__UpperCamelCase :Optional[int] = asm
__UpperCamelCase :Optional[int] = n_langs
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :int = n_special
__UpperCamelCase :List[Any] = hidden_size
__UpperCamelCase :int = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :int = hidden_dropout_prob
__UpperCamelCase :Dict = attention_probs_dropout_prob
__UpperCamelCase :Optional[Any] = max_position_embeddings
__UpperCamelCase :List[str] = type_sequence_label_size
__UpperCamelCase :Dict = initializer_range
__UpperCamelCase :Any = num_labels
__UpperCamelCase :Tuple = num_choices
__UpperCamelCase :Any = summary_type
__UpperCamelCase :Dict = use_proj
__UpperCamelCase :Union[str, Any] = scope
__UpperCamelCase :List[str] = bos_token_id
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :int = random_attention_mask([self.batch_size, self.seq_length])
__UpperCamelCase :int = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Optional[Any] = None
if self.use_token_type_ids:
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Tuple = None
__UpperCamelCase :Optional[int] = None
__UpperCamelCase :Any = None
if self.use_labels:
__UpperCamelCase :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size] , 2).float()
__UpperCamelCase :Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self) -> Dict:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :str = XLMModel(config=A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[int] = model(A_ , lengths=A_ , langs=A_)
__UpperCamelCase :List[str] = model(A_ , langs=A_)
__UpperCamelCase :List[Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Dict:
__UpperCamelCase :int = XLMWithLMHeadModel(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[Any] = model(A_ , token_type_ids=A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :Tuple = XLMForQuestionAnsweringSimple(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Tuple = model(A_)
__UpperCamelCase :Any = model(A_ , start_positions=A_ , end_positions=A_)
__UpperCamelCase :str = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Dict = XLMForQuestionAnswering(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Optional[Any] = model(A_)
__UpperCamelCase :int = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , p_mask=A_ , )
__UpperCamelCase :List[Any] = model(
A_ , start_positions=A_ , end_positions=A_ , cls_index=A_ , is_impossible=A_ , )
((__UpperCamelCase ) , ) :Union[str, Any] = result_with_labels.to_tuple()
__UpperCamelCase :Dict = model(A_ , start_positions=A_ , end_positions=A_)
((__UpperCamelCase ) , ) :Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[int]:
__UpperCamelCase :Dict = XLMForSequenceClassification(A_)
model.to(A_)
model.eval()
__UpperCamelCase :List[str] = model(A_)
__UpperCamelCase :List[Any] = model(A_ , labels=A_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :Optional[int] = XLMForTokenClassification(A_)
model.to(A_)
model.eval()
__UpperCamelCase :Any = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[str]:
__UpperCamelCase :Dict = self.num_choices
__UpperCamelCase :Optional[int] = XLMForMultipleChoice(config=A_)
model.to(A_)
model.eval()
__UpperCamelCase :str = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Optional[Any] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Any = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
__UpperCamelCase :Tuple = model(
A_ , attention_mask=A_ , token_type_ids=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> int:
__UpperCamelCase :List[Any] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :List[Any] = config_and_inputs
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
a__ : Union[str, Any] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
a__ : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : Optional[int] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase=False) -> int:
__UpperCamelCase :Any = super()._prepare_for_class(A_ , A_ , return_labels=A_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__UpperCamelCase :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
__UpperCamelCase :List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A_)
return inputs_dict
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Optional[int] = XLMModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=A_ , emb_dim=37)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1) -> List[Any]:
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_attentions in attentions] , [True] * len(A_))
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(A_):
# adds PAD dummy token
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(A_))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase=False , __lowercase=1) -> Tuple:
self.assertIsInstance(A_ , A_)
self.assertListEqual(
[isinstance(A_ , A_) for iter_hidden_states in hidden_states] , [True] * len(A_) , )
self.assertEqual(len(A_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(A_):
# adds PAD dummy token
__UpperCamelCase :str = min_length + idx + 1
__UpperCamelCase :List[str] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(A_) , )
pass
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Dict = XLMModel.from_pretrained(A_)
self.assertIsNotNone(A_)
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Any = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''')
model.to(A_)
__UpperCamelCase :int = torch.tensor([[14, 447]] , dtype=torch.long , device=A_) # the president
__UpperCamelCase :Union[str, Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__UpperCamelCase :List[Any] = model.generate(A_ , do_sample=A_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , A_)
| 43
|
"""simple docstring"""
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : int ,A_ : int ) -> Union[str, Any]:
A = n
A = [None] * self.n
A = 0 # index of the first element
A = 0
A = 0
def __len__( self : int ) -> int:
return self.size
def _SCREAMING_SNAKE_CASE ( self : Any ) -> bool:
return self.size == 0
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
return False if self.is_empty() else self.array[self.front]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ) -> int:
if self.size >= self.n:
raise Exception('QUEUE IS FULL' )
A = data
A = (self.rear + 1) % self.n
self.size += 1
return self
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
if self.size == 0:
raise Exception('UNDERFLOW' )
A = self.array[self.front]
A = None
A = (self.front + 1) % self.n
self.size -= 1
return temp
| 74
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase : List[str] = {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/config.json''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/config.json''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'''
),
}
class A_ ( _a ):
lowerCAmelCase__ = 'xlm-roberta'
def __init__( self: List[Any] ,__lowerCAmelCase: int=30_522 ,__lowerCAmelCase: Optional[Any]=768 ,__lowerCAmelCase: Optional[Any]=12 ,__lowerCAmelCase: str=12 ,__lowerCAmelCase: Optional[Any]=3_072 ,__lowerCAmelCase: str="gelu" ,__lowerCAmelCase: Optional[Any]=0.1 ,__lowerCAmelCase: Tuple=0.1 ,__lowerCAmelCase: str=512 ,__lowerCAmelCase: Any=2 ,__lowerCAmelCase: Any=0.02 ,__lowerCAmelCase: Dict=1e-12 ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Optional[Any]=0 ,__lowerCAmelCase: Dict=2 ,__lowerCAmelCase: int="absolute" ,__lowerCAmelCase: Optional[int]=True ,__lowerCAmelCase: List[str]=None ,**__lowerCAmelCase: Tuple ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,bos_token_id=__lowerCAmelCase ,eos_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : Optional[int] = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[int] = intermediate_size
_lowerCamelCase : Optional[int] = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : Union[str, Any] = type_vocab_size
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Optional[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = position_embedding_type
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : List[Any] = classifier_dropout
class A_ ( _a ):
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
_lowerCamelCase : List[Any] = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCamelCase : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 340
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale
| 340
| 1
|
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class a :
def __init__( self : Tuple , __lowerCAmelCase : Any=None , __lowerCAmelCase : Any=None ):
# Input as list
_UpperCAmelCase = list(poly_a or [0] )[:]
_UpperCAmelCase = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
_UpperCAmelCase = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
_UpperCAmelCase = len(self.polyB )
# Add 0 to make lengths equal a power of 2
_UpperCAmelCase = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
_UpperCAmelCase = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
_UpperCAmelCase = self.__multiply()
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : str ):
_UpperCAmelCase = [[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(_lowerCamelCase ) <= 1:
return dft[0]
#
_UpperCAmelCase = self.c_max_length // 2
while next_ncol > 0:
_UpperCAmelCase = [[] for i in range(_lowerCamelCase )]
_UpperCAmelCase = self.root**next_ncol
# First half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
_UpperCAmelCase = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(_lowerCamelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
_UpperCAmelCase = new_dft
_UpperCAmelCase = next_ncol // 2
return dft[0]
def lowerCAmelCase_ ( self : str ):
_UpperCAmelCase = self.__dft("""A""" )
_UpperCAmelCase = self.__dft("""B""" )
_UpperCAmelCase = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
_UpperCAmelCase = 2
while next_ncol <= self.c_max_length:
_UpperCAmelCase = [[] for i in range(_lowerCamelCase )]
_UpperCAmelCase = self.root ** (next_ncol // 2)
_UpperCAmelCase = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
_UpperCAmelCase = new_inverse_c
next_ncol *= 2
# Unpack
_UpperCAmelCase = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : str ):
_UpperCAmelCase = '''A = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
_UpperCAmelCase = '''B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
_UpperCAmelCase = '''A*B = ''' + ''' + '''.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 289
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = ''
SCREAMING_SNAKE_CASE__ = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , ):
super().__init__(self , **_lowerCamelCase )
a :Union[str, Any] = repo_info
a :int = token
a :int = None
def SCREAMING_SNAKE_CASE__ ( self ):
if self.dir_cache is None:
a :Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
a :List[Any] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'''name''': str(_lowerCamelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , ):
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
a :Optional[int] = hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , **_lowerCamelCase ):
self._get_dirs()
a :Union[str, Any] = self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ):
self._get_dirs()
a :str = PurePosixPath(path.strip('''/''' ) )
a :Tuple = {}
for p, f in self.dir_cache.items():
a :Optional[int] = PurePosixPath(p.strip('''/''' ) )
a :str = p.parent
if root == path:
a :List[str] = f
a :Any = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 94
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ : str = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {
"""sayakpaul/vit-msn-base""": """https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json""",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = "vit_msn"
def __init__( self , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.0 , __A=0.0 , __A=0.02 , __A=1E-0_6 , __A=224 , __A=16 , __A=3 , __A=True , **__A , ) -> Any:
super().__init__(**__A )
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =initializer_range
a =layer_norm_eps
a =image_size
a =patch_size
a =num_channels
a =qkv_bias
| 360
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase_ : Optional[int] = None
lowerCamelCase_ : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase_ : str = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : int = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : Optional[Any] = {
"""google/rembert""": 2_5_6,
}
lowerCamelCase_ : Optional[Any] = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
a =do_lower_case
a =remove_space
a =keep_accents
a =vocab_file
a =False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__A ) )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 215
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_SCREAMING_SNAKE_CASE = "\\n Text data.\n Second line of data."
_SCREAMING_SNAKE_CASE = "file"
@pytest.fixture(scope="session" )
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
_lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE_ , "utf-8" )
with zstd.open(SCREAMING_SNAKE_CASE_ , "wb" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return path
@pytest.fixture
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , SCREAMING_SNAKE_CASE_ ) , "w" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def __a(SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
_lowerCAmelCase = input_paths[compression_format]
_lowerCAmelCase = tmp_path / "cache"
_lowerCAmelCase = DownloadConfig(cache_dir=SCREAMING_SNAKE_CASE_ , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "custom_cache"
_lowerCAmelCase = "custom_extracted_dir"
_lowerCAmelCase = tmp_path / "custom_extracted_path"
if default_extracted:
_lowerCAmelCase = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , SCREAMING_SNAKE_CASE_ )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(SCREAMING_SNAKE_CASE_ ) )
_lowerCAmelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_lowerCAmelCase = xz_file
_lowerCAmelCase = (
DownloadConfig(extract_compressed_file=SCREAMING_SNAKE_CASE_ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=SCREAMING_SNAKE_CASE_ )
)
_lowerCAmelCase = cached_path(SCREAMING_SNAKE_CASE_ , download_config=SCREAMING_SNAKE_CASE_ )
assert Path(SCREAMING_SNAKE_CASE_ ).parent.parts[-2:] == expected
def __a(SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve() )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
# relative path
_lowerCAmelCase = str(Path(SCREAMING_SNAKE_CASE_ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(SCREAMING_SNAKE_CASE_ ) == text_file
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
# relative path
_lowerCAmelCase = "./__missing_file__.txt"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(SCREAMING_SNAKE_CASE_ ) as f:
_lowerCAmelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a():
'''simple docstring'''
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_get("https://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_get("ftp://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_get("s3://huggingface.co" , temp_file=SCREAMING_SNAKE_CASE_ )
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
fsspec_head("s3://huggingface.co" )
| 158
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Tuple = datasets.Audio()
__lowerCamelCase : List[str] = "audio"
__lowerCamelCase : Optional[int] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Optional[int] = AudioClassification(audio_column="audio" ,label_column="label" )
_SCREAMING_SNAKE_CASE = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE = AUDIO_EXTENSIONS
| 158
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def __UpperCamelCase () -> str:
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
lowercase__ = '__test_patch_submodule_mock__'
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def __UpperCamelCase () -> Any:
assert _test_patching.open is open
lowercase__ = '__test_patch_submodule_builtin_mock__'
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def __UpperCamelCase () -> List[str]:
# pandas.read_csv is not present in _test_patching
lowercase__ = '__test_patch_submodule_missing_mock__'
with patch_submodule(_test_patching , 'pandas.read_csv' , _SCREAMING_SNAKE_CASE ):
pass
def __UpperCamelCase () -> List[str]:
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
lowercase__ = '__test_patch_submodule_missing_builtin_mock__'
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ) is None
with patch_submodule(_test_patching , 'len' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.len is mock
assert _test_patching.len is len
def __UpperCamelCase () -> List[str]:
lowercase__ = '__test_patch_submodule_start_and_stop_mock__'
lowercase__ = patch_submodule(_test_patching , 'open' , _SCREAMING_SNAKE_CASE )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def __UpperCamelCase () -> Optional[int]:
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
lowercase__ = '__test_patch_submodule_successive_join__'
lowercase__ = '__test_patch_submodule_successive_dirname__'
lowercase__ = '__test_patch_submodule_successive_rename__'
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , 'os.rename' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.join' , _SCREAMING_SNAKE_CASE ):
with patch_submodule(_test_patching , 'os.path.dirname' , _SCREAMING_SNAKE_CASE ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def __UpperCamelCase () -> Optional[Any]:
lowercase__ = '__test_patch_submodule_doesnt_exist_mock__'
with patch_submodule(_test_patching , '__module_that_doesn_exist__.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
with patch_submodule(_test_patching , 'os.__attribute_that_doesn_exist__' , _SCREAMING_SNAKE_CASE ):
pass
| 269
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, int]:
def constraint_to_multiple_of(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=None ):
lowercase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase__ = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase__ = math.ceil(val / multiple ) * multiple
return x
lowercase__ = (output_size, output_size) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else output_size
lowercase__ , lowercase__ = get_image_size(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = output_size
# determine new height and width
lowercase__ = output_height / input_height
lowercase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase__ = scale_width
else:
# fit height
lowercase__ = scale_height
lowercase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_SCREAMING_SNAKE_CASE )
lowercase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_SCREAMING_SNAKE_CASE )
return (new_height, new_width)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = ['pixel_values']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = False , a : int = 1 , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : Tuple , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 384, 'width': 384}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = keep_aspect_ratio
lowercase__ = ensure_multiple_of
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : np.ndarray , a : Dict[str, int] , a : bool = False , a : int = 1 , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = get_resize_output_image_size(
a , output_size=(size['height'], size['width']) , keep_aspect_ratio=a , multiple=a , )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Dict , )-> str:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Optional[int] , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : ImageInput , a : bool = None , a : int = None , a : bool = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : str , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : List[Tuple] = None )-> Optional[int]:
"""simple docstring"""
lowercase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a ) != len(a ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(a ):
lowercase__ = target_sizes.numpy()
lowercase__ = []
for idx in range(len(a ) ):
lowercase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=a )
lowercase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a )
else:
lowercase__ = logits.argmax(dim=1 )
lowercase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 269
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.