code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
def __init__( self : List[str] , lowerCAmelCase_ : pyspark.sql.DataFrame , lowerCAmelCase_ : Optional[NamedSplit] = None , lowerCAmelCase_ : Optional[Features] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : str = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : str = "arrow" , **lowerCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(
split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase_ = load_from_cache_file
lowercase_ = file_format
lowercase_ = Spark(
df=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , working_dir=lowerCAmelCase_ , **lowerCAmelCase_ , )
def _UpperCAmelCase ( self : int):
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
lowercase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 136
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "gpt_neox"
def __init__( self : Union[str, Any] , lowerCAmelCase_ : str=5_0_4_3_2 , lowerCAmelCase_ : List[Any]=6_1_4_4 , lowerCAmelCase_ : str=4_4 , lowerCAmelCase_ : Tuple=6_4 , lowerCAmelCase_ : Optional[int]=2_4_5_7_6 , lowerCAmelCase_ : List[Any]="gelu" , lowerCAmelCase_ : Any=0.25 , lowerCAmelCase_ : int=1_0_0_0_0 , lowerCAmelCase_ : str=0.0 , lowerCAmelCase_ : Dict=0.0 , lowerCAmelCase_ : Optional[Any]=0.1 , lowerCAmelCase_ : Union[str, Any]=2_0_4_8 , lowerCAmelCase_ : Optional[int]=0.02 , lowerCAmelCase_ : List[Any]=1E-5 , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Optional[Any]=2 , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : int=None , **lowerCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , **lowerCAmelCase_)
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = rotary_pct
lowercase_ = rotary_emb_base
lowercase_ = attention_dropout
lowercase_ = hidden_dropout
lowercase_ = classifier_dropout
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = use_cache
lowercase_ = tie_word_embeddings
lowercase_ = use_parallel_residual
lowercase_ = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""")
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCAmelCase_) or len(self.rope_scaling) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'''got {self.rope_scaling}''')
lowercase_ = self.rope_scaling.get("""type""" , lowerCAmelCase_)
lowercase_ = self.rope_scaling.get("""factor""" , lowerCAmelCase_)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(lowerCAmelCase_ , lowerCAmelCase_) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 136
| 1
|
lowerCAmelCase : Any = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCAmelCase : Any = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCAmelCase : List[str] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 127
|
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase : Any = """path-to-your-trained-model"""
lowerCAmelCase : int = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("""cuda""")
lowerCAmelCase : Union[str, Any] = """A photo of sks dog in a bucket"""
lowerCAmelCase : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("""dog-bucket.png""")
| 127
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
class lowercase_ ( lowercase ):
'''simple docstring'''
__snake_case = '''timm_backbone'''
def __init__( self : Optional[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=3 , __UpperCAmelCase : str=True , __UpperCAmelCase : int=True , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict , ) ->Tuple:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
a = backbone
a = num_channels
a = features_only
a = use_pretrained_backbone
a = True
a = out_indices if out_indices is not None else (-1,)
| 0
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[Any] = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _snake_case ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase_ : List[str] = IFPipeline
lowerCAmelCase_ : int = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : List[Any] = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self , a__ , a__=0 ) -> str:
'''simple docstring'''
if str(a__ ).startswith("mps" ):
snake_case_ = torch.manual_seed(a__ )
else:
snake_case_ = torch.Generator(device=a__ ).manual_seed(a__ )
snake_case_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
snake_case_ = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a__ , tokenizer=a__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
snake_case_ , snake_case_ = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
snake_case_ = None
snake_case_ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
snake_case_ = IFImgaImgPipeline(**pipe_a.components )
snake_case_ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a__ , a__ , a__ , a__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
snake_case_ = IFInpaintingPipeline(**pipe_a.components )
snake_case_ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a__ , a__ , a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> Dict:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def lowerCAmelCase__ ( self , a__ , a__ , a__ , a__ ) -> str:
'''simple docstring'''
_start_torch_memory_measurement()
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , num_inference_steps=2 , generator=a__ , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (64, 64, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a__ , a__ )
# pipeline 2
_start_torch_memory_measurement()
snake_case_ = torch.Generator(device="cpu" ).manual_seed(0 )
snake_case_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a__ )
snake_case_ = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a__ )
snake_case_ = pipe_a(
prompt_embeds=a__ , negative_prompt_embeds=a__ , image=a__ , mask_image=a__ , original_image=a__ , generator=a__ , num_inference_steps=2 , output_type="np" , )
snake_case_ = output.images[0]
assert image.shape == (256, 256, 3)
snake_case_ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
snake_case_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a__ , a__ )
def UpperCamelCase_( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 92
| 0
|
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 67
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67
| 1
|
"""simple docstring"""
from maths.prime_check import is_prime
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
if not isinstance(__snake_case, __snake_case ):
_UpperCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__snake_case )
if is_prime(__snake_case ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100
|
"""simple docstring"""
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def lowerCamelCase__ ( __snake_case ) -> None:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = analyze_text(__snake_case )
_UpperCamelCase = list(''' ''' + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCamelCase = sum(single_char_strings.values() )
# one length string
_UpperCamelCase = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCamelCase = single_char_strings[ch]
_UpperCamelCase = my_str / all_sum
my_fir_sum += prob * math.loga(__snake_case ) # entropy formula.
# print entropy
print(F'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
_UpperCamelCase = sum(two_char_strings.values() )
_UpperCamelCase = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCamelCase = cha + cha
if sequence in two_char_strings:
_UpperCamelCase = two_char_strings[sequence]
_UpperCamelCase = int(__snake_case ) / all_sum
my_sec_sum += prob * math.loga(__snake_case )
# print second entropy
print(F'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(F'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def lowerCamelCase__ ( __snake_case ) -> tuple[dict, dict]:
"""simple docstring"""
_UpperCamelCase = Counter() # type: ignore
_UpperCamelCase = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(__snake_case ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 100
| 1
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
UpperCAmelCase : int = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def _A ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
a__ : Optional[int] =XLNetConfig.from_json_file(SCREAMING_SNAKE_CASE )
a__ : Dict =finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f'''Building PyTorch XLNetForSequenceClassification model from configuration: {config}''' )
a__ : List[str] =finetuning_task
a__ : Tuple =GLUE_TASKS_NUM_LABELS[finetuning_task]
a__ : List[Any] =XLNetForSequenceClassification(SCREAMING_SNAKE_CASE )
elif "squad" in finetuning_task:
a__ : Optional[int] =finetuning_task
a__ : Dict =XLNetForQuestionAnswering(SCREAMING_SNAKE_CASE )
else:
a__ : List[Any] =XLNetLMHeadModel(SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
a__ : Dict =os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(f'''Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}''' )
with open(SCREAMING_SNAKE_CASE , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
UpperCAmelCase : int = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 95
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_a )
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : str = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
snake_case__ : ClassVar[Features] = Features({"""audio""": Audio()} )
snake_case__ : ClassVar[Features] = Features({"""transcription""": Value("""string""" )} )
snake_case__ : str = "audio"
snake_case__ : str = "transcription"
def _A ( self : List[str] , __lowerCamelCase : Dict ):
if self.audio_column not in features:
raise ValueError(F"""Column {self.audio_column} is not present in features.""" )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(F"""Column {self.audio_column} is not an Audio type.""" )
UpperCamelCase :int = copy.deepcopy(self )
UpperCamelCase :Any = self.input_schema.copy()
UpperCamelCase :List[str] = features[self.audio_column]
UpperCamelCase :List[Any] = input_schema
return task_template
@property
def _A ( self : Optional[int] ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 38
| 0
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ = None
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = BloomTokenizerFast
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = '''tokenizer_file'''
UpperCAmelCase__ = {'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def SCREAMING_SNAKE_CASE ( self : int) ->Optional[Any]:
'''simple docstring'''
super().setUp()
A__ = BloomTokenizerFast.from_pretrained('''bigscience/tokenizer''')
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self : List[str] , **UpperCAmelCase__ : str) ->int:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
A__ = self.get_rust_tokenizer()
A__ = ['''The quick brown fox</s>''', '''jumps over the lazy dog</s>''']
A__ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
A__ = tokenizer.batch_encode_plus(UpperCAmelCase__)['''input_ids''']
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
A__ = tokenizer.batch_decode(UpperCAmelCase__)
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase__ : List[Any]=6) ->Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
A__ = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
try:
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__)
tokenizer_r.encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__)
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__)
tokenizer_r.encode(UpperCAmelCase__ , max_length=UpperCAmelCase__)
tokenizer_r.batch_encode_plus(UpperCAmelCase__ , max_length=UpperCAmelCase__)
except ValueError:
self.fail('''Bloom Tokenizer should be able to deal with padding''')
A__ = None # Hotfixing padding = None
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Simple input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Simple input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Pair input
self.assertRaises(UpperCAmelCase__ , tokenizer_r.encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''')
# Pair input
self.assertRaises(
UpperCAmelCase__ , tokenizer_r.batch_encode_plus , UpperCAmelCase__ , max_length=UpperCAmelCase__ , padding='''max_length''' , )
def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Optional[Any]:
'''simple docstring'''
A__ = self.get_rust_tokenizer()
A__ = load_dataset('''xnli''' , '''all_languages''' , split='''test''' , streaming=UpperCAmelCase__)
A__ = next(iter(UpperCAmelCase__))['''premise'''] # pick up one data
A__ = list(sample_data.values())
A__ = list(map(tokenizer.encode , UpperCAmelCase__))
A__ = [tokenizer.decode(UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__) for x in output_tokens]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 231
|
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase__ : float) ->float:
'''simple docstring'''
return 0.0
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int | float, int | float]:
"""simple docstring"""
A__ = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] )
A__ = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.abs(np.fft.fft(lowercase_ ) )
A__ = 20 * np.logaa(lowercase_ )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
A__ = get_bounds(lowercase_ , lowercase_ )
plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(lowercase_ )
plt.show()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> None:
"""simple docstring"""
A__ = 512
A__ = [1] + [0] * (size - 1)
A__ = [filter_type.process(lowercase_ ) for item in inputs]
A__ = [0] * (samplerate - size) # zero-padding
outputs += filler
A__ = np.angle(np.fft.fft(lowercase_ ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(24 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(lowercase_ , -2 * pi ) )
plt.show()
| 231
| 1
|
import doctest
from collections import deque
import numpy as np
class A__ :
"""simple docstring"""
def __init__( self ):
snake_case = [2, 1, 2, -1]
snake_case = [1, 2, 3, 4]
def a_ ( self ):
snake_case = len(self.first_signal )
snake_case = len(self.second_signal )
snake_case = max(__snake_case , __snake_case )
# create a zero matrix of max_length x max_length
snake_case = [[0] * max_length for i in range(__snake_case )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__snake_case ):
snake_case = deque(self.second_signal )
rotated_signal.rotate(__snake_case )
for j, item in enumerate(__snake_case ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case = np.matmul(np.transpose(__snake_case ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__snake_case , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 127
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
@dataclass
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__snake_case ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
snake_case = deprecated_arg[3:]
setattr(self , __snake_case , not kwargs.pop(__snake_case ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
snake_case = kwargs.pop('''torchscript''' , self.torchscript )
snake_case = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
snake_case = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**__snake_case )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Trace the models using torchscript'} )
__magic_name__ = field(default=snake_case__ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__magic_name__ = field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
snake_case = torch.device('''cpu''' )
snake_case = 0
elif is_torch_tpu_available():
snake_case = xm.xla_device()
snake_case = 0
else:
snake_case = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
snake_case = torch.cuda.device_count()
return device, n_gpu
@property
def a_ ( self ):
return is_torch_tpu_available() and self.tpu
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def a_ ( self ):
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def a_ ( self ):
return self.n_gpu > 0
| 127
| 1
|
"""simple docstring"""
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : List[str] , ):
_snake_case = parent
_snake_case = 13
_snake_case = 7
_snake_case = 30
_snake_case = self.seq_length + self.mem_len
_snake_case = 15
_snake_case = True
_snake_case = True
_snake_case = 99
_snake_case = [10, 50, 80]
_snake_case = 32
_snake_case = 32
_snake_case = 4
_snake_case = 8
_snake_case = 128
_snake_case = 2
_snake_case = 2
_snake_case = None
_snake_case = 1
_snake_case = 0
_snake_case = 3
_snake_case = self.vocab_size - 1
_snake_case = 0.0_1
def lowercase ( self : List[Any] ):
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowercase ( self : Optional[Any] ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowercase ( self : int , _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] ):
_snake_case = TFTransfoXLModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] ):
_snake_case = TFTransfoXLLMHeadModel(_lowerCamelCase )
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
_snake_case , _snake_case = model([input_ids_a, mems_a] ).to_tuple()
_snake_case = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
_snake_case , _snake_case = model(_lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : str ):
_snake_case = TFTransfoXLForSequenceClassification(_lowerCamelCase )
_snake_case = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Dict ):
_snake_case = self.prepare_config_and_inputs()
((_snake_case) , (_snake_case) , (_snake_case) , (_snake_case)) = config_and_inputs
_snake_case = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__a = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__a = () if is_tf_available() else ()
__a = (
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowercase ( self : Any ):
_snake_case = TFTransfoXLModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , d_embed=37 )
def lowercase ( self : Any ):
self.config_tester.run_common_tests()
def lowercase ( self : str ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_lowerCamelCase )
def lowercase ( self : int ):
self.model_tester.set_seed()
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_lowerCamelCase )
def lowercase ( self : int ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_lowerCamelCase )
def lowercase ( self : Tuple ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
_snake_case = model.get_output_embeddings()
assert isinstance(_lowerCamelCase , tf.keras.layers.Layer )
_snake_case = model.get_bias()
assert name is None
else:
_snake_case = model.get_output_embeddings()
assert x is None
_snake_case = model.get_bias()
assert name is None
def lowercase ( self : Tuple ):
pass
@slow
def lowercase ( self : str ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFTransfoXLModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowercase ( self : int ):
pass
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowercase ( self : List[str] ):
_snake_case = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
_snake_case = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
_snake_case = [33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
_snake_case = model.generate(_lowerCamelCase , max_length=200 , do_sample=_lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _lowerCamelCase )
| 352
|
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=13 , _lowerCamelCase : int=32 , _lowerCamelCase : List[str]=3 , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Optional[int]=[10, 20, 30, 40] , _lowerCamelCase : Dict=[2, 2, 3, 2] , _lowerCamelCase : Dict=True , _lowerCamelCase : Tuple=True , _lowerCamelCase : Tuple=37 , _lowerCamelCase : Optional[Any]="gelu" , _lowerCamelCase : Optional[Any]=10 , _lowerCamelCase : Any=0.0_2 , _lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , _lowerCamelCase : Any=[2, 3, 4] , _lowerCamelCase : Any=None , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = num_channels
_snake_case = num_stages
_snake_case = hidden_sizes
_snake_case = depths
_snake_case = is_training
_snake_case = use_labels
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = out_features
_snake_case = out_indices
_snake_case = scope
def lowercase ( self : Dict ):
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowercase ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] ):
_snake_case = ConvNextVaModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase ( self : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] ):
_snake_case = ConvNextVaForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase , labels=_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple ):
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_snake_case = None
_snake_case = ConvNextVaBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
_snake_case = model(_lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase ( self : str ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
def lowercase ( self : int ):
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values, '''labels''': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__a = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
__a = False
def lowercase ( self : str ):
_snake_case = ConvNextVaModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase , hidden_size=37 )
def lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase ( self : Dict ):
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def lowercase ( self : Dict ):
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def lowercase ( self : int ):
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def lowercase ( self : int ):
pass
def lowercase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = True
if model_class.__name__ in [
*get_values(_lowerCamelCase ),
*get_values(_lowerCamelCase ),
]:
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Dict ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_with_labels()
_snake_case = False
_snake_case = True
if (
model_class.__name__
in [*get_values(_lowerCamelCase ), *get_values(_lowerCamelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.gradient_checkpointing_enable()
model.train()
_snake_case = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase , return_labels=_lowerCamelCase )
_snake_case = model(**_lowerCamelCase ).loss
loss.backward()
def lowercase ( self : Optional[Any] ):
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(_lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def lowercase ( self : Optional[Any] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowercase ( self : Optional[int] ):
def check_hidden_states_output(_lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] ):
_snake_case = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = self.model_tester.num_stages
self.assertEqual(len(_lowerCamelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def lowercase ( self : str ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ConvNextVaModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _UpperCAmelCase ( ) -> Optional[Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowercase ( self : List[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def lowercase ( self : Optional[Any] ):
_snake_case = ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = preprocessor(images=_lowerCamelCase , return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**_lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _lowerCamelCase )
_snake_case = torch.tensor([0.9_9_9_6, 0.1_9_6_6, -0.4_3_8_6] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCamelCase , atol=1e-4 ) )
| 40
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return max(metric_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for gt in ground_truths )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = []
if args.gold_data_mode == "qa":
snake_case_ = pd.read_csv(SCREAMING_SNAKE_CASE__ , sep='''\t''' , header=SCREAMING_SNAKE_CASE__ )
for answer_list in data[1]:
snake_case_ = ast.literal_eval(SCREAMING_SNAKE_CASE__ )
answers.append(SCREAMING_SNAKE_CASE__ )
else:
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = [[reference] for reference in references]
snake_case_ = snake_case_ = snake_case_ = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case_ = 100.0 * em / total
snake_case_ = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = args.k
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = [line.strip() for line in open(SCREAMING_SNAKE_CASE__ , '''r''' ).readlines()]
snake_case_ = snake_case_ = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = set(hypo.split('''\t''' )[:k] )
snake_case_ = set(reference.split('''\t''' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case_ = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def strip_title(SCREAMING_SNAKE_CASE__ ):
if title.startswith('''"''' ):
snake_case_ = title[1:]
if title.endswith('''"''' ):
snake_case_ = title[:-1]
return title
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , )['''input_ids'''].to(args.device )
snake_case_ = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE__ )
snake_case_ = question_enc_outputs[0]
snake_case_ = rag_model.retriever(
SCREAMING_SNAKE_CASE__ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='''pt''' , )
snake_case_ = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case_ = []
for docs in all_docs:
snake_case_ = [strip_title(SCREAMING_SNAKE_CASE__ ) for title in docs['''title''']]
provenance_strings.append('''\t'''.join(SCREAMING_SNAKE_CASE__ ) )
return provenance_strings
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with torch.no_grad():
snake_case_ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
snake_case_ = inputs_dict.input_ids.to(args.device )
snake_case_ = inputs_dict.attention_mask.to(args.device )
snake_case_ = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE__ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case_ = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
logger.info('''Q: {} - A: {}'''.format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
return answers
def __SCREAMING_SNAKE_CASE ():
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''' , choices=['''rag_sequence''', '''rag_token''', '''bart'''] , type=SCREAMING_SNAKE_CASE__ , help=(
'''RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'''
''' model_name_or_path'''
) , )
parser.add_argument(
'''--index_name''' , default=SCREAMING_SNAKE_CASE__ , choices=['''exact''', '''compressed''', '''legacy'''] , type=SCREAMING_SNAKE_CASE__ , help='''RAG model retriever type''' , )
parser.add_argument(
'''--index_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='''Path to the retrieval index''' , )
parser.add_argument('''--n_docs''' , default=5 , type=SCREAMING_SNAKE_CASE__ , help='''Number of retrieved docs''' )
parser.add_argument(
'''--model_name_or_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to pretrained checkpoints or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--eval_mode''' , choices=['''e2e''', '''retrieval'''] , default='''e2e''' , type=SCREAMING_SNAKE_CASE__ , help=(
'''Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'''
''' precision@k.'''
) , )
parser.add_argument('''--k''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''k for the precision@k calculation''' )
parser.add_argument(
'''--evaluation_set''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a file containing evaluation samples''' , )
parser.add_argument(
'''--gold_data_path''' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='''Path to a tab-separated file with gold samples''' , )
parser.add_argument(
'''--gold_data_mode''' , default='''qa''' , type=SCREAMING_SNAKE_CASE__ , choices=['''qa''', '''ans'''] , help=(
'''Format of the gold data file'''
'''qa - a single line in the following format: question [tab] answer_list'''
'''ans - a single line of the gold file contains the expected answer string'''
) , )
parser.add_argument(
'''--predictions_path''' , type=SCREAMING_SNAKE_CASE__ , default='''predictions.txt''' , help='''Name of the predictions file, to be stored in the checkpoints directory''' , )
parser.add_argument(
'''--eval_all_checkpoints''' , action='''store_true''' , help='''Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number''' , )
parser.add_argument(
'''--eval_batch_size''' , default=8 , type=SCREAMING_SNAKE_CASE__ , help='''Batch size per GPU/CPU for evaluation.''' , )
parser.add_argument(
'''--recalculate''' , help='''Recalculate predictions even if the prediction file exists''' , action='''store_true''' , )
parser.add_argument(
'''--num_beams''' , default=4 , type=SCREAMING_SNAKE_CASE__ , help='''Number of beams to be used when generating answers''' , )
parser.add_argument('''--min_length''' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='''Min length of the generated answers''' )
parser.add_argument('''--max_length''' , default=50 , type=SCREAMING_SNAKE_CASE__ , help='''Max length of the generated answers''' )
parser.add_argument(
'''--print_predictions''' , action='''store_true''' , help='''If True, prints predictions while evaluating.''' , )
parser.add_argument(
'''--print_docs''' , action='''store_true''' , help='''If True, prints docs retried while generating.''' , )
snake_case_ = parser.parse_args()
snake_case_ = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
return args
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = {}
if args.model_type is None:
snake_case_ = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('''rag''' ):
snake_case_ = RagTokenForGeneration if args.model_type == '''rag_token''' else RagSequenceForGeneration
snake_case_ = args.n_docs
if args.index_name is not None:
snake_case_ = args.index_name
if args.index_path is not None:
snake_case_ = args.index_path
else:
snake_case_ = BartForConditionalGeneration
snake_case_ = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('''Evaluate the following checkpoints: %s''' , SCREAMING_SNAKE_CASE__ )
snake_case_ = get_scores if args.eval_mode == '''e2e''' else get_precision_at_k
snake_case_ = evaluate_batch_eae if args.eval_mode == '''e2e''' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('''Calculating metrics based on an existing predictions file: {}'''.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
continue
logger.info('''***** Running evaluation for {} *****'''.format(SCREAMING_SNAKE_CASE__ ) )
logger.info(''' Batch size = %d''' , args.eval_batch_size )
logger.info(''' Predictions will be stored under {}'''.format(args.predictions_path ) )
if args.model_type.startswith('''rag''' ):
snake_case_ = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
snake_case_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , retriever=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.retriever.init_retrieval()
else:
snake_case_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
model.to(args.device )
with open(args.evaluation_set , '''r''' ) as eval_file, open(args.predictions_path , '''w''' ) as preds_file:
snake_case_ = []
for line in tqdm(SCREAMING_SNAKE_CASE__ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE__ ) == args.eval_batch_size:
snake_case_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
preds_file.flush()
snake_case_ = []
if len(SCREAMING_SNAKE_CASE__ ) > 0:
snake_case_ = evaluate_batch_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
preds_file.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE__ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCAmelCase_ = get_args()
main(args)
| 8
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"""The `image_to_image.py` script is outdated. Please use directly `from diffusers import"""
""" StableDiffusionImg2ImgPipeline` instead."""
)
| 92
| 0
|
'''simple docstring'''
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
UpperCAmelCase_ = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
UpperCAmelCase_ = logging.WARNING
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = os.getenv("""DATASETS_VERBOSITY""" , SCREAMING_SNAKE_CASE__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys() ) }''' )
return _default_log_level
def _UpperCamelCase ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def _UpperCamelCase ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[str] = None ):
'''simple docstring'''
if name is None:
UpperCAmelCase__ = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = False
def _UpperCamelCase ( ):
'''simple docstring'''
UpperCAmelCase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Union[str, Any] ): # pylint: disable=unused-argument
"""simple docstring"""
UpperCAmelCase__ = args[0] if args else None
def __iter__( self : int ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self : Any , _UpperCAmelCase : List[Any] ):
"""simple docstring"""
def empty_fn(*_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
"""simple docstring"""
return self
def __exit__( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any ):
"""simple docstring"""
return
UpperCAmelCase_ = True
class lowerCAmelCase_ :
def __call__( self : str , *_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=False , **_UpperCAmelCase : List[str] ):
"""simple docstring"""
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*_UpperCAmelCase , **_UpperCAmelCase )
else:
return EmptyTqdm(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , *_UpperCAmelCase : List[str] , **_UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
UpperCAmelCase_ = _tqdm_cls()
def _UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def _UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ = True
def _UpperCamelCase ( ):
'''simple docstring'''
global _tqdm_active
UpperCAmelCase__ = False
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = """cvt"""
def __init__( self : List[Any] , _UpperCAmelCase : List[Any]=3 , _UpperCAmelCase : int=[7, 3, 3] , _UpperCAmelCase : Optional[Any]=[4, 2, 2] , _UpperCAmelCase : List[Any]=[2, 1, 1] , _UpperCAmelCase : Optional[int]=[64, 1_92, 3_84] , _UpperCAmelCase : Any=[1, 3, 6] , _UpperCAmelCase : Tuple=[1, 2, 10] , _UpperCAmelCase : Union[str, Any]=[4.0, 4.0, 4.0] , _UpperCAmelCase : Optional[int]=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.0] , _UpperCAmelCase : Dict=[0.0, 0.0, 0.1] , _UpperCAmelCase : Optional[int]=[True, True, True] , _UpperCAmelCase : Dict=[False, False, True] , _UpperCAmelCase : Dict=["dw_bn", "dw_bn", "dw_bn"] , _UpperCAmelCase : int=[3, 3, 3] , _UpperCAmelCase : Optional[int]=[1, 1, 1] , _UpperCAmelCase : List[Any]=[2, 2, 2] , _UpperCAmelCase : Union[str, Any]=[1, 1, 1] , _UpperCAmelCase : str=[1, 1, 1] , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Dict=1E-12 , **_UpperCAmelCase : Any , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = patch_sizes
UpperCAmelCase__ = patch_stride
UpperCAmelCase__ = patch_padding
UpperCAmelCase__ = embed_dim
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = depth
UpperCAmelCase__ = mlp_ratio
UpperCAmelCase__ = attention_drop_rate
UpperCAmelCase__ = drop_rate
UpperCAmelCase__ = drop_path_rate
UpperCAmelCase__ = qkv_bias
UpperCAmelCase__ = cls_token
UpperCAmelCase__ = qkv_projection_method
UpperCAmelCase__ = kernel_qkv
UpperCAmelCase__ = padding_kv
UpperCAmelCase__ = stride_kv
UpperCAmelCase__ = padding_q
UpperCAmelCase__ = stride_q
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
| 61
| 0
|
"""simple docstring"""
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertListEqual(dset.column_names , ["""col_1""", """col_2"""])
for i, r in enumerate(lowerCAmelCase__):
self.assertDictEqual(lowerCAmelCase__ , example_records[i])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = self._create_example_records()
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def snake_case_ ( self): # checks what happens with missing columns
__SCREAMING_SNAKE_CASE = [{"""col_1""": 1}, {"""col_2""": """x"""}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertDictEqual(dset[0] , {"""col_1""": 1})
self.assertDictEqual(dset[1] , {"""col_1""": None}) # NB: first record is used for columns
def snake_case_ ( self): # checks if the type can be inferred from the second record
__SCREAMING_SNAKE_CASE = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
__SCREAMING_SNAKE_CASE = Dataset.from_list(lowerCAmelCase__)
self.assertEqual(dset.info.features["""col_1"""] , Sequence(Value("""int64""")))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = Dataset.from_list([])
self.assertEqual(len(lowerCAmelCase__) , 0)
self.assertListEqual(dset.column_names , [])
| 100
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip_2_vision_model'''
def __init__( self , lowerCAmelCase__=1_4_0_8 , lowerCAmelCase__=6_1_4_4 , lowerCAmelCase__=3_9 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2_2_4 , lowerCAmelCase__=1_4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0_00_01 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-10 , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = qkv_bias
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''blip_2_qformer'''
def __init__( self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-12 , lowerCAmelCase__=0 , lowerCAmelCase__="absolute" , lowerCAmelCase__=2 , lowerCAmelCase__=1_4_0_8 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = cross_attention_frequency
__SCREAMING_SNAKE_CASE = encoder_hidden_size
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , **lowerCAmelCase__):
cls._set_token_in_kwargs(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""") == "blip-2":
__SCREAMING_SNAKE_CASE = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[Any] = '''blip-2'''
__lowercase : Any = True
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=3_2 , **lowerCAmelCase__):
super().__init__(**lowerCAmelCase__)
if vision_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""")
if qformer_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""")
if text_config is None:
__SCREAMING_SNAKE_CASE = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""")
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = BlipaQFormerConfig(**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.text_config.tie_word_embeddings
__SCREAMING_SNAKE_CASE = self.text_config.is_encoder_decoder
__SCREAMING_SNAKE_CASE = num_query_tokens
__SCREAMING_SNAKE_CASE = self.vision_config.hidden_size
__SCREAMING_SNAKE_CASE = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
__SCREAMING_SNAKE_CASE = 1.0
__SCREAMING_SNAKE_CASE = 0.02
@classmethod
def snake_case_ ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__)
__SCREAMING_SNAKE_CASE = self.vision_config.to_dict()
__SCREAMING_SNAKE_CASE = self.qformer_config.to_dict()
__SCREAMING_SNAKE_CASE = self.text_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 100
| 1
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__UpperCAmelCase = pd.read_csv('''sample_data.csv''', header=None)
__UpperCAmelCase = df.shape[:1][0]
# If you're using some other dataset input the target column
__UpperCAmelCase = df.iloc[:, 1:2]
__UpperCAmelCase = actual_data.values.reshape(len_data, 1)
__UpperCAmelCase = MinMaxScaler().fit_transform(actual_data)
__UpperCAmelCase = 10
__UpperCAmelCase = 5
__UpperCAmelCase = 20
__UpperCAmelCase = len_data - periods * look_back
__UpperCAmelCase = actual_data[:division]
__UpperCAmelCase = actual_data[division - look_back :]
__UpperCAmelCase , __UpperCAmelCase = [], []
__UpperCAmelCase , __UpperCAmelCase = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__UpperCAmelCase = np.array(train_x)
__UpperCAmelCase = np.array(test_x)
__UpperCAmelCase = np.array([list(i.ravel()) for i in train_y])
__UpperCAmelCase = np.array([list(i.ravel()) for i in test_y])
__UpperCAmelCase = Sequential()
model.add(LSTM(1_28, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(1_28, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__UpperCAmelCase = model.fit(
x_train, y_train, epochs=1_50, verbose=1, shuffle=True, batch_size=4
)
__UpperCAmelCase = model.predict(x_test)
| 42
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCAmelCase = logging.getLogger(__name__)
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : List[Any] ):
# save results
if os.path.exists(__magic_name__ ):
if os.path.exists(os.path.join(__magic_name__ , "config.json" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "config.json" ) ):
os.remove(os.path.join(__magic_name__ , "config.json" ) )
if os.path.exists(os.path.join(__magic_name__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(__magic_name__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(__magic_name__ , "pytorch_model.bin" ) )
else:
os.makedirs(__magic_name__ )
model.save_pretrained(__magic_name__ )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any]=False ):
a__: int =2
if unlogit:
a__: Union[str, Any] =torch.pow(__magic_name__ , __magic_name__ )
a__: str =p * torch.log(__magic_name__ )
a__: Dict =0
return -plogp.sum(dim=-1 )
def __lowerCamelCase ( __magic_name__ : Optional[int] ):
logger.info("lv, h >\t" + "\t".join(F"{x + 1}" for x in range(len(__magic_name__ ) ) ) )
for row in range(len(__magic_name__ ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + "\t".join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __lowerCamelCase ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=True , __magic_name__ : int=True , __magic_name__ : Dict=None , __magic_name__ : Union[str, Any]=False ):
a__ , a__: int =model.config.num_hidden_layers, model.config.num_attention_heads
a__: List[str] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
a__: List[Any] =torch.zeros(__magic_name__ , __magic_name__ ).to(args.device )
if head_mask is None:
a__: Any =torch.ones(__magic_name__ , __magic_name__ ).to(args.device )
head_mask.requires_grad_(requires_grad=__magic_name__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
a__: int =None
a__: Optional[int] =0.0
a__: Optional[Any] =0.0
for step, inputs in enumerate(tqdm(__magic_name__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
a__: Tuple =tuple(t.to(args.device ) for t in inputs )
((a__) , ): List[Any] =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
a__: List[Any] =model(__magic_name__ , labels=__magic_name__ , head_mask=__magic_name__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
a__ , a__ , a__: Optional[Any] =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__magic_name__ ):
a__: int =entropy(attn.detach() , __magic_name__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__magic_name__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
a__: Any =2
a__: Any =torch.pow(torch.pow(__magic_name__ , __magic_name__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
a__: int =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(__magic_name__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(__magic_name__ )
logger.info("Head ranked by importance scores" )
a__: Any =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
a__: List[Any] =torch.arange(
head_importance.numel() , device=args.device )
a__: int =head_ranks.view_as(__magic_name__ )
print_ad_tensor(__magic_name__ )
return attn_entropy, head_importance, total_loss
def __lowerCamelCase ( __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : Tuple ):
a__ , a__ , a__: List[Any] =compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ )
a__: List[str] =1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , __magic_name__ , original_score * args.masking_threshold )
a__: Union[str, Any] =torch.ones_like(__magic_name__ )
a__: Optional[Any] =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
a__: Union[str, Any] =original_score
while current_score >= original_score * args.masking_threshold:
a__: Dict =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
a__: List[Any] =float("Inf" )
a__: List[str] =head_importance.view(-1 ).sort()[1]
if len(__magic_name__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
a__: Union[str, Any] =current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
a__: Any =new_head_mask.view(-1 )
a__: Optional[int] =0.0
a__: Optional[int] =new_head_mask.view_as(__magic_name__ )
a__: str =new_head_mask.clone().detach()
print_ad_tensor(__magic_name__ )
# Compute metric and head importance again
a__ , a__ , a__: Optional[Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , __magic_name__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(__magic_name__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def __lowerCamelCase ( __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any ):
a__: Any =datetime.now()
a__ , a__ , a__: int =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ )
a__: Optional[int] =1 / loss
a__: Optional[Any] =datetime.now() - before_time
a__: str =sum(p.numel() for p in model.parameters() )
a__: Optional[Any] ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__magic_name__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(__magic_name__ , __magic_name__ ):
a__: List[Any] =[
v,
]
assert sum(len(__magic_name__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__magic_name__ )
a__: Dict =sum(p.numel() for p in model.parameters() )
a__: Any =datetime.now()
a__ , a__ , a__: Union[str, Any] =compute_heads_importance(
__magic_name__ , __magic_name__ , __magic_name__ , compute_entropy=__magic_name__ , compute_importance=__magic_name__ , head_mask=__magic_name__ , actually_pruned=__magic_name__ , )
a__: Dict =1 / loss
a__: Dict =datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , __magic_name__ , __magic_name__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , __magic_name__ , __magic_name__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(__magic_name__ , args.output_dir )
def __lowerCamelCase ( ):
a__: int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=__magic_name__ , type=__magic_name__ , required=__magic_name__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=__magic_name__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=__magic_name__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=__magic_name__ , type=__magic_name__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=__magic_name__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don't normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don't normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=__magic_name__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=__magic_name__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=__magic_name__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=__magic_name__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=__magic_name__ , help="Batch size." )
parser.add_argument("--seed" , type=__magic_name__ , default=42 )
parser.add_argument("--local_rank" , type=__magic_name__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=__magic_name__ , default="" , help="Can be used for distant debugging." )
a__: Union[str, Any] =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__magic_name__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
a__: Tuple =torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
a__: Optional[Any] =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
a__: Optional[Any] =torch.device("cuda" , args.local_rank )
a__: Dict =1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
a__: Dict =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
a__: List[str] =nn.parallel.DistributedDataParallel(
__magic_name__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__magic_name__ )
elif args.n_gpu > 1:
a__: List[str] =nn.DataParallel(__magic_name__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__magic_name__ )
torch.save(__magic_name__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , __magic_name__ )
# Prepare dataset
a__: int =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
a__: Any =(torch.from_numpy(__magic_name__ ),)
a__: List[str] =TensorDataset(*__magic_name__ )
a__: Optional[int] =RandomSampler(__magic_name__ )
a__: Union[str, Any] =DataLoader(__magic_name__ , sampler=__magic_name__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__magic_name__ , __magic_name__ , __magic_name__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
a__: Optional[int] =mask_heads(__magic_name__ , __magic_name__ , __magic_name__ )
prune_heads(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
if __name__ == "__main__":
main()
| 42
| 1
|
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =CLIPTokenizer
_lowercase =CLIPTokenizerFast
_lowercase =True
_lowercase ={}
_lowercase =False
def __a ( self ) -> Dict:
super().setUp()
# fmt: off
lowerCAmelCase_ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCAmelCase_ = dict(zip(_UpperCamelCase , range(len(_UpperCamelCase ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_UpperCamelCase ) )
def __a ( self , **_UpperCamelCase ) -> Any:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , **_UpperCamelCase ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
lowerCAmelCase_ = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , _UpperCamelCase )
@require_ftfy
def __a ( self ) -> Union[str, Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
lowerCAmelCase_ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase_ = "xa\u0303y" + " " + "x\xe3y"
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase_ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase_ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase_ = tokenizer_s.tokenize(_UpperCamelCase )
lowerCAmelCase_ = tokenizer_r.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"""{text_of_1_token} {text_of_1_token}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_UpperCamelCase ) + 1, len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
lowerCAmelCase_ = f""" {text}"""
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_UpperCamelCase , use_fast=_UpperCamelCase , )
lowerCAmelCase_ = tokenizer_r(_UpperCamelCase , return_offsets_mapping=_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_UpperCamelCase ) + 1, 1 + len(_UpperCamelCase ) + 1 + len(_UpperCamelCase )) , )
def __a ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_UpperCamelCase ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def __a ( self ) -> str:
super().test_tokenization_python_rust_equals()
def __a ( self ) -> Any:
# CLIP always lower cases letters
pass
| 231
|
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231
| 1
|
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] ) -> Tuple:
UpperCamelCase__ : int = WavaVecaForSequenceClassification.from_pretrained(snake_case__ , config=snake_case__ )
UpperCamelCase__ : List[str] = downstream_dict['''projector.weight''']
UpperCamelCase__ : Dict = downstream_dict['''projector.bias''']
UpperCamelCase__ : Any = downstream_dict['''model.post_net.linear.weight''']
UpperCamelCase__ : Union[str, Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCAmelCase_ ( __UpperCAmelCase: List[Any] , __UpperCAmelCase: int , __UpperCAmelCase: Any ) -> Union[str, Any]:
UpperCamelCase__ : int = WavaVecaForAudioFrameClassification.from_pretrained(snake_case__ , config=snake_case__ )
UpperCamelCase__ : Dict = downstream_dict['''model.linear.weight''']
UpperCamelCase__ : int = downstream_dict['''model.linear.bias''']
return model
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any] ) -> Any:
UpperCamelCase__ : Dict = WavaVecaForXVector.from_pretrained(snake_case__ , config=snake_case__ )
UpperCamelCase__ : Tuple = downstream_dict['''connector.weight''']
UpperCamelCase__ : str = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCamelCase__ : List[str] = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
UpperCamelCase__ : Any = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
UpperCamelCase__ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
UpperCamelCase__ : Optional[Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
UpperCamelCase__ : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
UpperCamelCase__ : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
UpperCamelCase__ : Optional[int] = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: Any , __UpperCAmelCase: Tuple , __UpperCAmelCase: Union[str, Any] ) -> str:
UpperCamelCase__ : Any = torch.load(snake_case__ , map_location='''cpu''' )
UpperCamelCase__ : List[str] = checkpoint['''Downstream''']
UpperCamelCase__ : int = WavaVecaConfig.from_pretrained(snake_case__ )
UpperCamelCase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
snake_case__ , return_attention_mask=snake_case__ , do_normalize=snake_case__ )
UpperCamelCase__ : List[str] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
UpperCamelCase__ : Tuple = convert_classification(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
UpperCamelCase__ : List[Any] = convert_diarization(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith('''ForXVector''' ):
UpperCamelCase__ : List[Any] = convert_xvector(snake_case__ , snake_case__ , snake_case__ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
UpperCamelCase__ : str = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
UpperCAmelCase_ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 365
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCAmelCase_ ( ) -> Any:
UpperCamelCase__ : Dict = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=__UpperCAmelCase , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=__UpperCAmelCase , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=__UpperCAmelCase , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=__UpperCAmelCase , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=__UpperCAmelCase , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=__UpperCAmelCase , type=__UpperCAmelCase , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=__UpperCAmelCase , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=__UpperCAmelCase , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
UpperCamelCase__ : Any = parser.parse_args()
return args
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> Any:
def fn(__UpperCAmelCase: Dict ):
return tokenizer(examples['''text'''] )
return fn
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Dict:
UpperCamelCase__ : Optional[int] = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
UpperCamelCase__ : Dict = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
UpperCamelCase__ : int = tf.train.Features(feature=__UpperCAmelCase )
UpperCamelCase__ : Tuple = tf.train.Example(features=__UpperCAmelCase )
UpperCamelCase__ : List[Any] = example.SerializeToString()
records.append(__UpperCAmelCase )
return records
def lowerCAmelCase_ ( __UpperCAmelCase: Tuple ) -> int:
UpperCamelCase__ : str = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
UpperCamelCase__ : int = min(len(__UpperCAmelCase ) , args.limit )
UpperCamelCase__ : Optional[int] = dataset.select(range(__UpperCAmelCase ) )
print(f"Limiting the dataset to {args.limit} entries." )
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
UpperCamelCase__ : Dict = os.path.join(args.output_dir , args.split )
if not os.path.exists(__UpperCAmelCase ):
os.makedirs(__UpperCAmelCase )
else:
UpperCamelCase__ : Tuple = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
UpperCamelCase__ : Optional[int] = tokenize_function(__UpperCAmelCase )
UpperCamelCase__ : Optional[int] = dataset.map(__UpperCAmelCase , batched=__UpperCAmelCase , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCAmelCase: Optional[Any] ):
# Concatenate all texts.
UpperCamelCase__ : int = {k: sum(examples[k] , [] ) for k in examples.keys()}
UpperCamelCase__ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
UpperCamelCase__ : Any = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
UpperCamelCase__ : Dict = {
k: [t[i : i + args.max_length] for i in range(0 , __UpperCAmelCase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
UpperCamelCase__ : Optional[Any] = dataset_tokenized.map(__UpperCAmelCase , batched=__UpperCAmelCase , batch_size=1000 , num_proc=4 )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : Optional[Any] = 0
for shard in range(0 , len(__UpperCAmelCase ) , args.shard_size ):
UpperCamelCase__ : Optional[int] = grouped_dataset[shard : shard + args.shard_size]
UpperCamelCase__ : Any = len(dataset_snapshot['''input_ids'''] )
UpperCamelCase__ : Optional[int] = os.path.join(__UpperCAmelCase , f"dataset-{shard_count}-{records_containing}.tfrecord" )
UpperCamelCase__ : List[str] = get_serialized_examples(__UpperCAmelCase )
with tf.io.TFRecordWriter(__UpperCAmelCase ) as out_file:
for i in range(len(__UpperCAmelCase ) ):
UpperCamelCase__ : str = serialized_examples[i]
out_file.write(__UpperCAmelCase )
print('''Wrote file {} containing {} records'''.format(__UpperCAmelCase , __UpperCAmelCase ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = parse_args()
main(args)
| 247
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase__ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 68
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 40
| 0
|
"""simple docstring"""
def UpperCamelCase_ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[int] = [1]
for i in range(2 , lowerCAmelCase__ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
lowerCAmelCase_ : str = []
lowerCAmelCase_ : int = list(range(lowerCAmelCase__ ) )
# Find permutation
while factorials:
lowerCAmelCase_ : Dict = factorials.pop()
lowerCAmelCase_ : int = divmod(lowerCAmelCase__ , lowerCAmelCase__ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowercase__ : Optional[int] = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowercase__ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Any=13 , _snake_case : Tuple=7 , _snake_case : List[Any]=True , _snake_case : str=True , _snake_case : Dict=True , _snake_case : Optional[int]=True , _snake_case : List[Any]=99 , _snake_case : Optional[int]=64 , _snake_case : List[Any]=32 , _snake_case : Optional[Any]=5 , _snake_case : List[str]=4 , _snake_case : Union[str, Any]=37 , _snake_case : str="gelu" , _snake_case : int=0.1 , _snake_case : Union[str, Any]=0.1 , _snake_case : Union[str, Any]=512 , _snake_case : Dict=16 , _snake_case : Tuple=2 , _snake_case : Union[str, Any]=0.02 , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=4 , _snake_case : int=None , ):
__lowercase : Tuple = parent
__lowercase : Union[str, Any] = batch_size
__lowercase : List[str] = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : Any = use_input_mask
__lowercase : List[str] = use_token_type_ids
__lowercase : str = use_labels
__lowercase : int = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Optional[int] = embedding_size
__lowercase : Union[str, Any] = num_hidden_layers
__lowercase : List[Any] = num_attention_heads
__lowercase : str = intermediate_size
__lowercase : List[Any] = hidden_act
__lowercase : List[Any] = hidden_dropout_prob
__lowercase : str = attention_probs_dropout_prob
__lowercase : List[str] = max_position_embeddings
__lowercase : List[str] = type_vocab_size
__lowercase : Optional[int] = type_sequence_label_size
__lowercase : Tuple = initializer_range
__lowercase : Tuple = num_labels
__lowercase : str = num_choices
__lowercase : List[str] = scope
def snake_case_ ( self : List[str] ):
__lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_input_mask:
__lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : List[str] = None
if self.use_token_type_ids:
__lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : str = None
__lowercase : Dict = None
__lowercase : Union[str, Any] = None
if self.use_labels:
__lowercase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : int = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self : List[str] ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , )
def snake_case_ ( self : Any , _snake_case : int , _snake_case : Optional[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : int ):
__lowercase : Optional[int] = MobileBertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
__lowercase : Dict = model(lowercase_ , token_type_ids=lowercase_ )
__lowercase : Union[str, Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case_ ( self : List[Any] , _snake_case : Optional[int] , _snake_case : int , _snake_case : Dict , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : str , _snake_case : str ):
__lowercase : Optional[Any] = MobileBertForMaskedLM(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : str = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self : List[str] , _snake_case : Dict , _snake_case : Tuple , _snake_case : Tuple , _snake_case : List[str] , _snake_case : List[str] , _snake_case : str , _snake_case : List[str] ):
__lowercase : Any = MobileBertForNextSentencePrediction(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Optional[Any] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def snake_case_ ( self : Any , _snake_case : str , _snake_case : Union[str, Any] , _snake_case : List[str] , _snake_case : Tuple , _snake_case : Any , _snake_case : str , _snake_case : List[Any] ):
__lowercase : Optional[int] = MobileBertForPreTraining(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Tuple = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , next_sentence_label=lowercase_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def snake_case_ ( self : str , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : str , _snake_case : Any , _snake_case : Optional[int] , _snake_case : str , _snake_case : Union[str, Any] ):
__lowercase : Optional[int] = MobileBertForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : List[str] = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self : List[Any] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Tuple ):
__lowercase : int = self.num_labels
__lowercase : List[str] = MobileBertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Dict = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self : str , _snake_case : List[str] , _snake_case : Optional[int] , _snake_case : Dict , _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : Union[str, Any] ):
__lowercase : str = self.num_labels
__lowercase : Tuple = MobileBertForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case_ ( self : Tuple , _snake_case : Any , _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
__lowercase : str = self.num_choices
__lowercase : Dict = MobileBertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
__lowercase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowercase : str = model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case_ ( self : Tuple ):
__lowercase : int = self.prepare_config_and_inputs()
(
__lowercase
) : List[Any] = config_and_inputs
__lowercase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A__ : Optional[Any] = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A__ : Any = True
def snake_case_ ( self : Tuple , _snake_case : Union[str, Any] , _snake_case : Tuple , _snake_case : Any=False ):
__lowercase : str = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
__lowercase : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowercase_ )
__lowercase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def snake_case_ ( self : List[str] ):
__lowercase : str = MobileBertModelTester(self )
__lowercase : Tuple = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
def snake_case_ ( self : List[str] ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowercase_ )
def snake_case_ ( self : Optional[int] ):
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowercase_ )
def snake_case_ ( self : List[str] ):
__lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowercase_ )
def snake_case_ ( self : Dict ):
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowercase_ )
def snake_case_ ( self : List[Any] ):
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowercase_ )
def snake_case_ ( self : Optional[int] ):
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowercase_ )
def snake_case_ ( self : int ):
__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowercase_ )
def snake_case_ ( self : Dict ):
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowercase_ )
def UpperCAmelCase_ ( __lowerCAmelCase ) -> Tuple:
return torch.tensor(
__lowerCamelCase , dtype=torch.long , device=__lowerCamelCase , )
__lowerCAmelCase : int = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Tuple ):
__lowercase : int = MobileBertModel.from_pretrained('''google/mobilebert-uncased''' ).to(lowercase_ )
__lowercase : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] )
with torch.no_grad():
__lowercase : Union[str, Any] = model(lowercase_ )[0]
__lowercase : int = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , lowercase_ )
__lowercase : List[str] = torch.tensor(
[
[
[-2.4_7_3_6_5_2_6E0_7, 8.2_6_9_1_6_5_6E0_4, 1.6_5_2_1_8_3_8E0_5],
[-5.7_5_4_1_7_0_4E-0_1, 3.9_0_5_6_0_2_2E0_0, 4.4_0_1_1_5_0_7E0_0],
[2.6_0_4_7_3_5_9E0_0, 1.5_6_7_7_6_5_2E0_0, -1.7_3_2_4_1_8_8E-0_1],
]
] , device=lowercase_ , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowercase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowercase : str = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 156
|
"""simple docstring"""
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224", out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCAmelCase_ : Dict = MaskFormerConfig(backbone_config=__lowerCamelCase )
UpperCAmelCase_ : int = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCAmelCase_ : Dict = 847
UpperCAmelCase_ : str = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCAmelCase_ : Tuple = 150
UpperCAmelCase_ : int = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCAmelCase_ : str = 171
UpperCAmelCase_ : Optional[int] = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCAmelCase_ : int = 133
UpperCAmelCase_ : Tuple = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCAmelCase_ : List[Any] = 19
UpperCAmelCase_ : Optional[int] = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCAmelCase_ : Any = 65
UpperCAmelCase_ : Union[str, Any] = "mapillary-vistas-id2label.json"
UpperCAmelCase_ : Any = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type="dataset" ), "r" ) )
UpperCAmelCase_ : int = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
return config
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Dict = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3, 0, -1 ), range(0, 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = dct.pop(__lowerCamelCase )
UpperCAmelCase_ : str = val
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCAmelCase_ : List[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCAmelCase_ : Tuple = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
UpperCAmelCase_ : Optional[int] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Tuple = in_proj_weight[:dim, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[: dim]
UpperCAmelCase_ : Any = in_proj_weight[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[int] = in_proj_bias[
dim : dim * 2
]
UpperCAmelCase_ : Tuple = in_proj_weight[
-dim :, :
]
UpperCAmelCase_ : Tuple = in_proj_bias[-dim :]
# fmt: on
def __a ( __lowerCamelCase, __lowerCamelCase ):
# fmt: off
UpperCAmelCase_ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
UpperCAmelCase_ : int = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Any = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : int = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : Dict = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
UpperCAmelCase_ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : str = in_proj_weight[: hidden_size, :]
UpperCAmelCase_ : Tuple = in_proj_bias[:config.hidden_size]
UpperCAmelCase_ : int = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCAmelCase_ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
UpperCAmelCase_ : List[Any] = in_proj_weight[-hidden_size :, :]
UpperCAmelCase_ : Optional[Any] = in_proj_bias[-hidden_size :]
# fmt: on
def __a ( ):
UpperCAmelCase_ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Tuple = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : List[str] = get_maskformer_config(__lowerCamelCase )
# load original state_dict
with open(__lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : Union[str, Any] = pickle.load(__lowerCamelCase )
UpperCAmelCase_ : str = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCAmelCase_ : int = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase, config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase, __lowerCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCAmelCase_ : Optional[int] = torch.from_numpy(__lowerCamelCase )
# load 🤗 model
UpperCAmelCase_ : Dict = MaskFormerForInstanceSegmentation(__lowerCamelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCamelCase, param.shape )
UpperCAmelCase_ , UpperCAmelCase_ : str = model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCamelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
UpperCAmelCase_ : Optional[int] = prepare_img()
if "vistas" in model_name:
UpperCAmelCase_ : List[str] = 65
elif "cityscapes" in model_name:
UpperCAmelCase_ : Tuple = 6_5535
else:
UpperCAmelCase_ : Dict = 255
UpperCAmelCase_ : Optional[Any] = True if "ade" in model_name else False
UpperCAmelCase_ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCamelCase, reduce_labels=__lowerCamelCase )
UpperCAmelCase_ : Union[str, Any] = image_processor(__lowerCamelCase, return_tensors="pt" )
UpperCAmelCase_ : Dict = model(**__lowerCamelCase )
print("Logits:", outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCAmelCase_ : Any = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3], __lowerCamelCase, atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='maskformer-swin-tiny-ade',
type=str,
help=('Name of the MaskFormer model you\'d like to convert',),
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl',
type=str,
help='Path to the original state dict (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
_a = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 61
| 0
|
"""simple docstring"""
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Union[str, Any] = word.split()
def justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
_a : Optional[int] = max_width - width
_a : Tuple = len(UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
_a : List[Any] = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
_a : Optional[Any] = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
_a : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase__ ):
num_spaces_between_words_list[i] += 1
_a : Any = []
for i in range(UpperCamelCase__ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase__ )
_a : Union[str, Any] = []
_a : list[str] = []
_a : Dict = 0
for word in words:
if width + len(UpperCamelCase__ ) + len(UpperCamelCase__ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase__ )
width += len(UpperCamelCase__ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
# reset new line and new width
_a , _a : List[Any] = [word], len(UpperCamelCase__ )
_a : Dict = max_width - width - len(UpperCamelCase__ )
answer.append(""" """.join(UpperCamelCase__ ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
|
"""simple docstring"""
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
_snake_case = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = re.finditer(""".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)""" , UpperCamelCase__ )
return [m.group(0 ) for m in matches]
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : Tuple = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_a : Optional[int] = {
config.replace("""Config""" , """""" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_a : List[Any] = collections.defaultdict(UpperCamelCase__ )
_a : List[str] = collections.defaultdict(UpperCamelCase__ )
_a : Tuple = collections.defaultdict(UpperCamelCase__ )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(UpperCamelCase__ ):
_a : str = None
if _re_tf_models.match(UpperCamelCase__ ) is not None:
_a : List[Any] = tf_models
_a : int = _re_tf_models.match(UpperCamelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCamelCase__ ) is not None:
_a : Any = flax_models
_a : Any = _re_flax_models.match(UpperCamelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCamelCase__ ) is not None:
_a : int = pt_models
_a : int = _re_pt_models.match(UpperCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCamelCase__ ) > 0:
if attr_name in model_prefix_to_model_type:
_a : Optional[int] = True
break
# Try again after removing the last word in the name
_a : List[Any] = """""".join(camel_case_split(UpperCamelCase__ )[:-1] )
_a : Optional[int] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_a : Dict = list(UpperCamelCase__ )
all_models.sort()
_a : str = {"""model_type""": all_models}
_a : List[Any] = [pt_models[t] for t in all_models]
_a : str = [tf_models[t] for t in all_models]
_a : Optional[int] = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_a : str = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_a : List[str] = """AutoProcessor"""
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_a : str = """AutoTokenizer"""
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_a : int = """AutoFeatureExtractor"""
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_a : int = """AutoTokenizer"""
_a : Any = [processors[t] for t in all_models]
return pd.DataFrame(UpperCamelCase__ )
def lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
_a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_a : List[Any] = [model_mapping, F"""TF_{model_mapping}""", F"""FLAX_{model_mapping}"""]
_a : Union[str, Any] = [auto_class, F"""TF_{auto_class}""", F"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# The type of pipeline may not exist in this framework
if not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
continue
# First extract all model_names
_a : str = []
for name in getattr(UpperCamelCase__ , UpperCamelCase__ ).values():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
model_names.append(UpperCamelCase__ )
else:
model_names.extend(list(UpperCamelCase__ ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = get_frameworks_table()
_a : Optional[Any] = Dataset.from_pandas(UpperCamelCase__ )
_a : Any = hf_hub_download(
"""huggingface/transformers-metadata""" , """pipeline_tags.json""" , repo_type="""dataset""" , token=UpperCamelCase__ )
_a : List[Any] = Dataset.from_json(UpperCamelCase__ )
_a : List[str] = {
tags_dataset[i]["""model_class"""]: (tags_dataset[i]["""pipeline_tag"""], tags_dataset[i]["""auto_class"""])
for i in range(len(UpperCamelCase__ ) )
}
_a : str = update_pipeline_and_auto_class_table(UpperCamelCase__ )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_a : int = sorted(table.keys() )
_a : Union[str, Any] = pd.DataFrame(
{
"""model_class""": model_classes,
"""pipeline_tag""": [table[m][0] for m in model_classes],
"""auto_class""": [table[m][1] for m in model_classes],
} )
_a : Dict = Dataset.from_pandas(UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(UpperCamelCase__ , """frameworks.json""" ) )
tags_dataset.to_json(os.path.join(UpperCamelCase__ , """pipeline_tags.json""" ) )
if commit_sha is not None:
_a : List[str] = (
F"""Update with commit {commit_sha}\n\nSee: """
F"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
_a : Optional[Any] = """Update"""
upload_folder(
repo_id="""huggingface/transformers-metadata""" , folder_path=UpperCamelCase__ , repo_type="""dataset""" , token=UpperCamelCase__ , commit_message=UpperCamelCase__ , )
def lowerCAmelCase__ ( ):
'''simple docstring'''
_a : List[str] = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_a : Any = transformers_module.pipelines.SUPPORTED_TASKS
_a : List[str] = []
for key in pipeline_tasks:
if key not in in_table:
_a : Tuple = pipeline_tasks[key]["""pt"""]
if isinstance(UpperCamelCase__ , (list, tuple) ):
_a : Dict = model[0]
_a : List[str] = model.__name__
if model not in in_table.values():
missing.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_a : Union[str, Any] = """, """.join(UpperCamelCase__ )
raise ValueError(
"""The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside """
F"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 324
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowercase : Dict = logging.get_logger(__name__)
lowercase : List[Any] = {"vocab_file": "spiece.model"}
lowercase : Union[str, Any] = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
}
}
lowercase : List[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
# Segments (not really needed)
lowercase : str = 0
lowercase : str = 1
lowercase : List[str] = 2
lowercase : Optional[Any] = 3
lowercase : List[Any] = 4
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = VOCAB_FILES_NAMES
__lowercase = PRETRAINED_VOCAB_FILES_MAP
__lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase = """left"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="<s>" , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<sep>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_="<cls>" , lowerCAmelCase_="<mask>" , lowerCAmelCase_=["<eop>", "<eod>"] , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = AddedToken(lowerCAmelCase_ , lstrip=lowerCAmelCase_ , rstrip=lowerCAmelCase_ ) if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) else mask_token
_snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=lowerCAmelCase_ , remove_space=lowerCAmelCase_ , keep_accents=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , sep_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , cls_token=lowerCAmelCase_ , mask_token=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase_ , )
_snake_case = 3
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(lowerCAmelCase_ )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return len(self.sp_model )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = {self.convert_ids_to_tokens(lowerCAmelCase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
"""simple docstring"""
_snake_case = self.__dict__.copy()
_snake_case = None
return state
def __setstate__( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_snake_case = {}
_snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
if self.remove_space:
_snake_case = ' '.join(inputs.strip().split() )
else:
_snake_case = inputs
_snake_case = outputs.replace('``' , '"' ).replace('\'\'' , '"' )
if not self.keep_accents:
_snake_case = unicodedata.normalize('NFKD' , lowerCAmelCase_ )
_snake_case = ''.join([c for c in outputs if not unicodedata.combining(lowerCAmelCase_ )] )
if self.do_lower_case:
_snake_case = outputs.lower()
return outputs
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.preprocess_text(lowerCAmelCase_ )
_snake_case = self.sp_model.encode(lowerCAmelCase_ , out_type=lowerCAmelCase_ )
_snake_case = []
for piece in pieces:
if len(lowerCAmelCase_ ) > 1 and piece[-1] == str(',' ) and piece[-2].isdigit():
_snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(lowerCAmelCase_ , '' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_snake_case = cur_pieces[1:]
else:
_snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(lowerCAmelCase_ )
else:
new_pieces.append(lowerCAmelCase_ )
return new_pieces
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.sp_model.PieceToId(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
return self.sp_model.IdToPiece(lowerCAmelCase_ )
def lowerCamelCase ( self , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = ''.join(lowerCAmelCase_ ).replace(lowerCAmelCase_ , ' ' ).strip()
return out_string
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = True , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = kwargs.pop('use_source_tokenizer' , lowerCAmelCase_ )
_snake_case = self.convert_ids_to_tokens(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
_snake_case = []
_snake_case = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
_snake_case = []
sub_texts.append(lowerCAmelCase_ )
else:
current_sub_text.append(lowerCAmelCase_ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(lowerCAmelCase_ ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
_snake_case = ''.join(lowerCAmelCase_ )
_snake_case = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
_snake_case = self.clean_up_tokenization(lowerCAmelCase_ )
return clean_text
else:
return text
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase_ , token_ids_a=lowerCAmelCase_ , already_has_special_tokens=lowerCAmelCase_ )
if token_ids_a is not None:
return ([0] * len(lowerCAmelCase_ )) + [1] + ([0] * len(lowerCAmelCase_ )) + [1, 1]
return ([0] * len(lowerCAmelCase_ )) + [1, 1]
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
_snake_case = [self.sep_token_id]
_snake_case = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase_ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase_ , 'wb' ) as fi:
_snake_case = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase_ )
return (out_vocab_file,)
| 42
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
lowercase : Optional[Any] = False
class __UpperCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained('shi-labs/versatile-diffusion' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase_ )
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = generator.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = VersatileDiffusionTextToImagePipeline.from_pretrained(
'shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_snake_case = 'A painting of a squirrel eating a burger '
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
prompt=lowerCAmelCase_ , generator=lowerCAmelCase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
_snake_case = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_snake_case = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 42
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCAmelCase__ = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCAmelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCAmelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCAmelCase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def snake_case_ (self ) -> List[Any]:
UpperCamelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
UpperCamelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}] )
UpperCamelCase = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
UpperCamelCase = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
UpperCamelCase = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
UpperCamelCase = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}] )
UpperCamelCase = text_classifier("This is great !" , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
UpperCamelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
UpperCamelCase = text_classifier(["This is great !", "Something else"] , return_all_scores=__a )
self.assertEqual(
nested_simplify(__a ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def snake_case_ (self ) -> Optional[int]:
import torch
UpperCamelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
UpperCamelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def snake_case_ (self ) -> Tuple:
UpperCamelCase = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
UpperCamelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = pipeline("text-classification" )
UpperCamelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def snake_case_ (self ) -> Dict:
UpperCamelCase = pipeline("text-classification" , framework="tf" )
UpperCamelCase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCamelCase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__a ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCamelCase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__a ) , [{"label": "POSITIVE", "score": 0.988}] )
def snake_case_ (self , __a , __a , __a ) -> Any:
UpperCamelCase = TextClassificationPipeline(model=__a , tokenizer=__a )
return text_classifier, ["HuggingFace is in", "This is another test"]
def snake_case_ (self , __a , __a ) -> Tuple:
UpperCamelCase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCamelCase = 'HuggingFace is in'
UpperCamelCase = text_classifier(__a )
self.assertEqual(nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
UpperCamelCase = ['HuggingFace is in ', 'Paris is in France']
UpperCamelCase = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}, {"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCamelCase = text_classifier(__a , top_k=__a )
UpperCamelCase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__a ) , [[{"label": ANY(__a ), "score": ANY(__a )}] * N, [{"label": ANY(__a ), "score": ANY(__a )}] * N] , )
UpperCamelCase = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
UpperCamelCase = text_classifier(__a )
self.assertEqual(
nested_simplify(__a ) , {"label": ANY(__a ), "score": ANY(__a )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCamelCase = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(__a ):
text_classifier(__a )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCamelCase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__a ) , [{"label": ANY(__a ), "score": ANY(__a )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 350
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (position - 1) // 2
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 1
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return (2 * position) + 2
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = []
UpperCamelCase = {}
UpperCamelCase = 0
def __len__(self ) -> int:
return self.elements
def __repr__(self ) -> str:
return str(self.heap )
def snake_case_ (self ) -> bool:
# Check if the priority queue is empty
return self.elements == 0
def snake_case_ (self , __a , __a ) -> None:
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCamelCase = self.elements
self.elements += 1
self._bubble_up(__a )
def snake_case_ (self ) -> T:
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCamelCase , UpperCamelCase = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCamelCase , UpperCamelCase = self.heap[0]
self._bubble_down(__a )
return elem
def snake_case_ (self , __a , __a ) -> None:
# Update the weight of the given key
UpperCamelCase = self.position_map[elem]
UpperCamelCase = (elem, weight)
if position > 0:
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__a )
else:
self._bubble_down(__a )
else:
self._bubble_down(__a )
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCamelCase = self.position_map[elem]
if curr_pos == 0:
return None
UpperCamelCase = get_parent_position(__a )
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase , UpperCamelCase = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__a , __a )
return self._bubble_up(__a )
return None
def snake_case_ (self , __a ) -> None:
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCamelCase = self.position_map[elem]
UpperCamelCase , UpperCamelCase = self.heap[curr_pos]
UpperCamelCase = get_child_left_position(__a )
UpperCamelCase = get_child_right_position(__a )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
if child_left_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
else:
return None
if child_right_position < self.elements:
UpperCamelCase , UpperCamelCase = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__a , __a )
return self._bubble_down(__a )
return None
def snake_case_ (self , __a , __a ) -> None:
# Swap the nodes at the given positions
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase = self.heap[nodea_pos][0]
UpperCamelCase , UpperCamelCase = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCamelCase = nodea_pos
UpperCamelCase = nodea_pos
class _lowerCamelCase ( Generic[T] ):
def __init__(self ) -> None:
UpperCamelCase = {}
UpperCamelCase = 0
def __repr__(self ) -> str:
return str(self.connections )
def __len__(self ) -> int:
return self.nodes
def snake_case_ (self , __a ) -> None:
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCamelCase = {}
self.nodes += 1
def snake_case_ (self , __a , __a , __a ) -> None:
# Add an edge between 2 nodes in the graph
self.add_node(__a )
self.add_node(__a )
UpperCamelCase = weight
UpperCamelCase = weight
def a__ ( _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
UpperCamelCase = {node: maxsize for node in graph.connections}
UpperCamelCase = {node: None for node in graph.connections}
UpperCamelCase = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCamelCase = priority_queue.extract_min()
UpperCamelCase = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCamelCase = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCamelCase = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCamelCase = node
return dist, parent
| 244
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class __UpperCamelCase :
lowercase : List[str]
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='Translation' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def lowercase__ ( self ):
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class __UpperCamelCase :
lowercase : Optional[List] =None
lowercase : Optional[int] =None
lowercase : Optional[str] =None
# Automatically constructed
lowercase : ClassVar[str] ="dict"
lowercase : ClassVar[Any] =None
lowercase : str =field(default='TranslationVariableLanguages' , init=lowerCamelCase__ , repr=lowerCamelCase__ )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =sorted(set(self.languages ) ) if self.languages else None
lowerCamelCase_ =len(self.languages ) if self.languages else None
def __call__( self ):
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def lowercase__ ( self, lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =set(self.languages )
if self.languages and set(lowerCAmelCase ) - lang_set:
raise ValueError(
f'''Some languages in example ({', '.join(sorted(set(lowerCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(lowerCAmelCase )}).''' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
lowerCamelCase_ =[]
for lang, text in translation_dict.items():
if isinstance(lowerCAmelCase, lowerCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
lowerCamelCase_, lowerCamelCase_ =zip(*sorted(lowerCAmelCase ) )
return {"language": languages, "translation": translations}
def lowercase__ ( self ):
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 75
|
"""simple docstring"""
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
"cross_validation.py",
"gradient_accumulation.py",
"local_sgd.py",
"multi_process_metrics.py",
"memory.py",
"automatic_gradient_accumulation.py",
"fsdp_with_peak_mem_tracking.py",
"deepspeed_with_config_support.py",
"megatron_lm_gpt_pretraining.py",
]
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : List[Any] , snake_case_ : str , snake_case_ : bool , snake_case_ : str = None , snake_case_ : list = None ) -> Tuple:
'''simple docstring'''
A__ = None
A__ = os.path.abspath(os.path.join("examples" , "by_feature" ) )
A__ = os.path.abspath("examples" )
for item in os.listdir(snake_case_ ):
if item not in EXCLUDE_EXAMPLES:
A__ = os.path.join(snake_case_ , snake_case_ )
if os.path.isfile(snake_case_ ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case_ , feature_script=snake_case_ , tested_section="main()" if parser_only else "training_function()" , ):
A__ = compare_against_test(
os.path.join(snake_case_ , snake_case_ ) , snake_case_ , snake_case_ , snake_case_ )
A__ = "\n".join(snake_case_ )
if special_strings is not None:
for string in special_strings:
A__ = diff.replace(snake_case_ , "" )
self.assertEqual(snake_case_ , "" )
def __magic_name__ ( self : List[str] ) -> str:
'''simple docstring'''
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
self.one_complete_example("complete_nlp_example.py" , snake_case_ )
def __magic_name__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
A__ = os.path.abspath(os.path.join("examples" , "cv_example.py" ) )
A__ = [
" " * 16 + "{\n\n",
" " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n",
" " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n",
" " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n",
" " * 20 + "\"epoch\": epoch,\n\n",
" " * 16 + "},\n\n",
" " * 16 + "step=epoch,\n",
" " * 12,
" " * 8 + "for step, batch in enumerate(active_dataloader):\n",
]
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
self.one_complete_example("complete_cv_example.py" , snake_case_ , snake_case_ , snake_case_ )
@mock.patch.dict(os.environ, {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( A_ ):
lowercase__ = False
@classmethod
def __magic_name__ ( cls : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
A__ = tempfile.mkdtemp()
A__ = os.path.join(cls._tmpdir , "default_config.yml" )
write_basic_config(save_location=cls.configPath )
A__ = ["accelerate", "launch", "--config_file", cls.configPath]
@classmethod
def __magic_name__ ( cls : Dict ) -> str:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __magic_name__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
A__ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) )
def __magic_name__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
def __magic_name__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
""".split()
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
if torch.cuda.is_available():
A__ = torch.cuda.device_count()
else:
A__ = 1
if num_processes > 1:
self.assertNotIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
else:
self.assertIn("epoch 0:" , snake_case_ )
self.assertIn("epoch 1:" , snake_case_ )
@slow
def __magic_name__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
A__ = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split()
with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ):
A__ = run_command(self._launch_args + testargs , return_stdout=snake_case_ )
A__ = re.findall("({.+})" , snake_case_ )
A__ = [r for r in results if "accuracy" in r][-1]
A__ = ast.literal_eval(snake_case_ )
self.assertGreaterEqual(results["accuracy"] , 0.75 )
def __magic_name__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
A__ = ["examples/by_feature/multi_process_metrics.py"]
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} )
def __magic_name__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
A__ = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case_ , "tracking" ) ) )
def __magic_name__ ( self : List[Any] ) -> int:
'''simple docstring'''
A__ = ["examples/by_feature/gradient_accumulation.py"]
run_command(self._launch_args + testargs )
def __magic_name__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
A__ = ["examples/by_feature/local_sgd.py"]
run_command(self._launch_args + testargs )
| 247
| 0
|
"""simple docstring"""
import operator as op
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = []
A = lambda lowercase__ , lowercase__ : int(x / y ) # noqa: E731 integer division operation
A = {
"^": op.pow,
"*": op.mul,
"/": div,
"+": op.add,
"-": op.sub,
} # operators & their respective operation
# print table header
print("Symbol".center(8 ) , "Action".center(12 ) , "Stack" , sep=" | " )
print("-" * (30 + len(lowercase__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(lowercase__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ("push(" + x + ")").ljust(12 ) , ",".join(lowercase__ ) , sep=" | " )
else:
A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + b + ")").ljust(12 ) , ",".join(lowercase__ ) , sep=" | " )
A = stack.pop() # pop stack
# output in tabular format
print("".rjust(8 ) , ("pop(" + a + ")").ljust(12 ) , ",".join(lowercase__ ) , sep=" | " )
stack.append(
str(opr[x](int(lowercase__ ) , int(lowercase__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ("push(" + a + x + b + ")").ljust(12 ) , ",".join(lowercase__ ) , sep=" | " , )
return int(stack[0] )
if __name__ == "__main__":
__A : int = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 352
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def __SCREAMING_SNAKE_CASE ( lowercase__=None ):
"""simple docstring"""
if subparsers is not None:
A = subparsers.add_parser("env" )
else:
A = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowercase__ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowercase__ )
return parser
def __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
A = torch.__version__
A = torch.cuda.is_available()
A = is_xpu_available()
A = is_npu_available()
A = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowercase__ ):
A = load_config_from_file(args.config_file ).to_dict()
A = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowercase__ ),
"PyTorch NPU available": str(lowercase__ ),
"System RAM": F"""{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB""",
}
if pt_cuda_available:
A = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([F"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
A = (
"\n".join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowercase__ , lowercase__ )
else F"""\t{accelerate_config}"""
)
print(lowercase__ )
A = accelerate_config
return info
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
A = env_command_parser()
A = parser.parse_args()
env_command(lowercase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 57
| 0
|
'''simple docstring'''
import math
def _A ( lowercase__ , lowercase__ ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 164
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class a ( lowerCAmelCase_ ):
_snake_case : Any = 'efficientnet'
def __init__( self : Any , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 600 , __lowerCAmelCase : float = 2.0 , __lowerCAmelCase : float = 3.1 , __lowerCAmelCase : int = 8 , __lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __lowerCAmelCase : List[int] = [] , __lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __lowerCAmelCase : float = 0.25 , __lowerCAmelCase : str = "swish" , __lowerCAmelCase : int = 2560 , __lowerCAmelCase : str = "mean" , __lowerCAmelCase : float = 0.02 , __lowerCAmelCase : float = 0.001 , __lowerCAmelCase : float = 0.99 , __lowerCAmelCase : float = 0.5 , __lowerCAmelCase : float = 0.2 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**__lowerCAmelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(__lowerCAmelCase ) * 4
class a ( lowerCAmelCase_ ):
_snake_case : Dict = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-5
| 289
| 0
|
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger()
@dataclass
class snake_case :
__UpperCamelCase = 42
__UpperCamelCase = field(default_factory=_UpperCamelCase)
__UpperCamelCase = field(default_factory=_UpperCamelCase)
def a_ ( self : Any , a__ : List[Any] , a__ : Tensor , a__ : Tensor ) -> List[Any]:
'''simple docstring'''
_A = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase__ , nn.Convad ) or isinstance(lowerCAmelCase__ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase__ )
def __call__( self : Optional[int] , a__ : Tensor ) -> List[Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase__ )
[x.remove() for x in self.handles]
return self
@property
def a_ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return list(filter(lambda a__ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class snake_case :
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 1
__UpperCamelCase = field(default_factory=_UpperCamelCase)
__UpperCamelCase = field(default_factory=_UpperCamelCase)
__UpperCamelCase = True
def __call__( self : List[str] , a__ : Tensor ) -> str:
'''simple docstring'''
_A = Tracker(self.dest )(lowerCAmelCase__ ).parametrized
_A = Tracker(self.src )(lowerCAmelCase__ ).parametrized
_A = list(filter(lambda a__ : type(lowerCAmelCase__ ) not in self.src_skip , lowerCAmelCase__ ) )
_A = list(filter(lambda a__ : type(lowerCAmelCase__ ) not in self.dest_skip , lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ) and self.raise_if_mismatch:
raise Exception(
F"""Numbers of operations are different. Source module has {len(lowerCAmelCase__ )} operations while"""
F""" destination module has {len(lowerCAmelCase__ )}.""" )
for dest_m, src_m in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F"""Transfered from={src_m} to={dest_m}""" )
class snake_case ( nn.Module):
def __init__( self : List[Any] , a__ : nn.Module ) -> Dict:
'''simple docstring'''
super().__init__()
_A = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), F"""Unexpected layer name {k}"""
_A = len(lowerCAmelCase__ ) + 1
feature_blocks.append((F"""res{block_index}""", v) )
_A = nn.ModuleDict(lowerCAmelCase__ )
def a_ ( self : List[str] , a__ : Tensor ) -> List[str]:
'''simple docstring'''
return get_trunk_forward_outputs(
lowerCAmelCase__ , out_feat_keys=lowerCAmelCase__ , feature_blocks=self._feature_blocks , )
class snake_case ( _UpperCamelCase):
def a_ ( self : str , a__ : str ) -> str:
'''simple docstring'''
_A = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Dict , a__ : str ) -> Callable[[], Tuple[nn.Module, Dict]]:
'''simple docstring'''
if x not in self:
_A = self.convert_name_to_timm(lowerCAmelCase__ )
_A = partial(lambda: (timm.create_model(lowerCAmelCase__ , pretrained=lowerCAmelCase__ ).eval(), None) )
else:
_A = super().__getitem__(lowerCAmelCase__ )
return val
class snake_case ( _UpperCamelCase):
def __getitem__( self : Optional[int] , a__ : str ) -> Callable[[], nn.Module]:
'''simple docstring'''
if "seer" in x and "in1k" not in x:
_A = RegNetModel
else:
_A = RegNetForImageClassification
return val
def a__ ( __lowercase , __lowercase , __lowercase ) -> Optional[Any]:
for from_key, to_key in keys:
_A = from_state_dict[from_key].clone()
print(f"""Copied key={from_key} to={to_key}""" )
return to_state_dict
def a__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = True , ) -> List[Any]:
print(f"""Converting {name}...""" )
with torch.no_grad():
_A = from_model_func()
_A = our_model_func(a_ ).eval()
_A = ModuleTransfer(src=a_ , dest=a_ , raise_if_mismatch=a_ )
_A = torch.randn((1, 3, 224, 224) )
module_transfer(a_ )
if from_state_dict is not None:
_A = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
_A = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
_A = manually_copy_vissl_head(a_ , our_model.state_dict() , a_ )
our_model.load_state_dict(a_ )
_A = our_model(a_ , output_hidden_states=a_ )
_A = (
our_outputs.logits if isinstance(a_ , a_ ) else our_outputs.last_hidden_state
)
_A = from_model(a_ )
_A = from_output[-1] if type(a_ ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
_A = our_outputs.hidden_states[-1]
assert torch.allclose(a_ , a_ ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=a_ , )
_A = 224 if "seer" not in name else 384
# we can use the convnext one
_A = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=a_ )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=a_ , )
print(f"""Pushed {name}""" )
def a__ ( __lowercase , __lowercase = None , __lowercase = True ) -> Dict:
_A = "imagenet-1k-id2label.json"
_A = 1000
_A = (1, num_labels)
_A = "huggingface/label-files"
_A = num_labels
_A = json.load(open(cached_download(hf_hub_url(a_ , a_ , repo_type="dataset" ) ) , "r" ) )
_A = {int(a_ ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
_A = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_ )
_A = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
_A = NameToOurModelFuncMap()
_A = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__lowercase , __lowercase ) -> Tuple[nn.Module, Dict]:
_A = torch.hub.load_state_dict_from_url(a_ , model_dir=str(a_ ) , map_location="cpu" )
_A = model_func()
# check if we have a head, if yes add it
_A = files["classy_state_dict"]["base_model"]["model"]
_A = model_state_dict["trunk"]
model.load_state_dict(a_ )
return model.eval(), model_state_dict["heads"]
# pretrained
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
_A = partial(
a_ , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
a_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , a_ , a_ , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
a_ , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , a_ , a_ , a_ , )
return config, expected_shape
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
a_ = parser.parse_args()
a_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 367
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 25_60_47
a_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class snake_case ( _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = NllbTokenizer
__UpperCamelCase = NllbTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = {}
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_A = NllbTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_A = NllbTokenizer(a__ , keep_accents=a__ )
_A = tokenizer.tokenize("This is a test" )
self.assertListEqual(a__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_A = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(a__ , **a__ )
_A = self.tokenizer_class.from_pretrained(a__ , **a__ )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a__ )
_A = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a__ )
_A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
_A = tokenizer_p.save_pretrained(a__ )
# Checks it save with the same files
self.assertSequenceEqual(a__ , a__ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a__ )
_A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a__ , legacy_format=a__ )
_A = tokenizer_p.save_pretrained(a__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a__ )
_A = tokenizer_p.from_pretrained(a__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a__ , a__ ) )
shutil.rmtree(a__ )
@require_torch
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.test_seqaseq:
return
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_A = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
_A = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
_A = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , tgt_texts=a__ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_A = tokenizer.prepare_seqaseq_batch(
a__ , tgt_texts=a__ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_A = tokenizer.prepare_seqaseq_batch(
src_texts=a__ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , a__ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def a_ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
def a_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = [AddedToken("<special>" , lstrip=a__ )]
_A = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
_A = tokenizer_r.encode("Hey this is a <special> token" )
_A = tokenizer_r.encode("<special>" , add_special_tokens=a__ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_A = self.rust_tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ , )
_A = self.tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , **a__ )
_A = tokenizer_p.encode("Hey this is a <special> token" )
_A = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(a__ , a__ )
self.assertEqual(a__ , a__ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def a_ ( cls : Optional[Any] ) -> Any:
'''simple docstring'''
_A = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_A = 1
return cls
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def a_ ( self : str ) -> Tuple:
'''simple docstring'''
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__ )
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
self.assertIn(a__ , self.tokenizer.all_special_ids )
# fmt: off
_A = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_A = self.tokenizer.decode(a__ , skip_special_tokens=a__ )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
self.assertNotIn(self.tokenizer.eos_token , a__ )
def a_ ( self : Dict ) -> str:
'''simple docstring'''
_A = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , a__ )
_A = 10
_A = self.tokenizer(a__ , max_length=a__ , truncation=a__ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a__ )
self.assertEqual(len(a__ ) , a__ )
def a_ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a__ )
_A = NllbTokenizer.from_pretrained(a__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a__ )
@require_torch
def a_ ( self : str ) -> str:
'''simple docstring'''
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_A = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(a__ , a__ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a__ )
self.assertEqual(a__ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
_A = self.tokenizer(self.src_text , padding=a__ , truncation=a__ , max_length=3 , return_tensors="pt" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=a__ , truncation=a__ , max_length=10 , return_tensors="pt" )
_A = targets["input_ids"]
_A = shift_tokens_right(
a__ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(a__ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = True
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_A = False
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 163
| 0
|
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('''the value of both inputs must be positive''' )
_UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
_UpperCamelCase = str(bin(lowercase ) )[2:] # remove the leading "0b"
_UpperCamelCase = max(len(lowercase ), len(lowercase ) )
return "0b" + "".join(
str(int(char_a == '''1''' and char_b == '''1''' ) )
for char_a, char_b in zip(a_binary.zfill(lowercase ), b_binary.zfill(lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 324
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
lowercase__ : str = logging.get_logger(__name__)
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Union[str, Any] = ['pixel_values']
def __init__( self : Optional[Any] , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Dict[str, int]] = None , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Union[int, float] = 1 / 255 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 256}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self : Tuple , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCamelCase = get_resize_output_image_size(lowerCAmelCase__ , size=size['''shortest_edge'''] , default_to_square=lowerCAmelCase__ )
return resize(lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Dict[str, int] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
_UpperCamelCase = get_size_dict(lowerCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowerCAmelCase__ , size=(size['''height'''], size['''width''']) , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Dict , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : float , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Union[float, List[float]] , lowerCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase__ : Any , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] , lowerCAmelCase__ : ImageInput , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : PILImageResampling = None , lowerCAmelCase__ : bool = None , lowerCAmelCase__ : Dict[str, int] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[float] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[float, List[float]]] = None , lowerCAmelCase__ : Optional[Union[str, TensorType]] = None , lowerCAmelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowerCAmelCase__ : Optional[Any] , ) -> Any:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , default_to_square=lowerCAmelCase__ )
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(lowerCAmelCase__ , param_name='''crop_size''' )
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=lowerCAmelCase__ , size=lowerCAmelCase__ , resample=lowerCAmelCase__ ) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=lowerCAmelCase__ , mean=lowerCAmelCase__ , std=lowerCAmelCase__ ) for image in images]
_UpperCamelCase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Tuple] = None ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowerCAmelCase__ ):
_UpperCamelCase = target_sizes.numpy()
_UpperCamelCase = []
for idx in range(len(lowerCAmelCase__ ) ):
_UpperCamelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowerCAmelCase__ )
_UpperCamelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCAmelCase__ )
else:
_UpperCamelCase = logits.argmax(dim=1 )
_UpperCamelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 324
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 17
|
'''simple docstring'''
def __UpperCAmelCase ( a_: str ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : Optional[Any] = ""
while len(a_ ) % 3 != 0:
_UpperCAmelCase : List[Any] = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(a_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : Optional[Any] = 0
for index, val in enumerate(a_ ):
oct_val += int(2 ** (2 - index) * int(a_ ) )
oct_string += str(a_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 17
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
lowerCamelCase_ = logging.get_logger(__name__)
@dataclass
class __A:
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=6.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_="fp4" , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase__ = load_in_abit
UpperCamelCase__ = load_in_abit
UpperCamelCase__ = llm_inta_threshold
UpperCamelCase__ = llm_inta_skip_modules
UpperCamelCase__ = llm_inta_enable_fpaa_cpu_offload
UpperCamelCase__ = llm_inta_has_fpaa_weight
UpperCamelCase__ = bnb_abit_quant_type
UpperCamelCase__ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
UpperCamelCase__ = torch.floataa
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.dtype ):
UpperCamelCase__ = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def UpperCAmelCase_ (self ):
if not isinstance(self.llm_inta_threshold , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def UpperCAmelCase_ (self ):
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase_ (self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = cls(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = []
for key, value in kwargs.items():
if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
to_remove.append(SCREAMING_SNAKE_CASE_ )
for key in to_remove:
kwargs.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
with open(SCREAMING_SNAKE_CASE_ , """w""" , encoding="""utf-8""" ) as writer:
UpperCamelCase__ = self.to_dict()
UpperCamelCase__ = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + """\n"""
writer.write(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__(self ):
return F"{self.__class__.__name__} {self.to_json_string()}"
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ = True ):
if use_diff is True:
UpperCamelCase__ = self.to_diff_dict()
else:
UpperCamelCase__ = self.to_dict()
return json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.to_dict()
# get the default config dict
UpperCamelCase__ = BitsAndBytesConfig().to_dict()
UpperCamelCase__ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
UpperCamelCase__ = value
return serializable_config_dict
| 244
|
def __magic_name__ ( __a : str ):
'''simple docstring'''
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(__a ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 244
| 1
|
"""simple docstring"""
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
_snake_case = '\\n@inproceedings{lin-2004-rouge,\n title = "{ROUGE}: A Package for Automatic Evaluation of Summaries",\n author = "Lin, Chin-Yew",\n booktitle = "Text Summarization Branches Out",\n month = jul,\n year = "2004",\n address = "Barcelona, Spain",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W04-1013",\n pages = "74--81",\n}\n'
_snake_case = '\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n'
_snake_case = '\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `"rouge{n}"` (e.g. `"rouge1"`, `"rouge2"`) where: {n} is the n-gram based scoring,\n `"rougeL"`: Longest common subsequence based scoring.\n `"rougeLSum"`: rougeLsum splits text using `"\n"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results["rouge1"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results["rouge1"].mid.fmeasure)\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase ( self : Dict ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : Dict=False ) -> Union[str, Any]:
if rouge_types is None:
_a : int = ["""rouge1""", """rouge2""", """rougeL""", """rougeLsum"""]
_a : Any = rouge_scorer.RougeScorer(rouge_types=UpperCAmelCase__ , use_stemmer=UpperCAmelCase__ )
if use_aggregator:
_a : Dict = scoring.BootstrapAggregator()
else:
_a : str = []
for ref, pred in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
_a : List[Any] = scorer.score(UpperCAmelCase__ , UpperCAmelCase__ )
if use_aggregator:
aggregator.add_scores(UpperCAmelCase__ )
else:
scores.append(UpperCAmelCase__ )
if use_aggregator:
_a : Optional[int] = aggregator.aggregate()
else:
_a : Optional[Any] = {}
for key in scores[0]:
_a : Tuple = [score[key] for score in scores]
return result
| 324
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'configuration_vision_encoder_decoder': ['VisionEncoderDecoderConfig', 'VisionEncoderDecoderOnnxConfig']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['VisionEncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['TFVisionEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['FlaxVisionEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 324
| 1
|
from __future__ import annotations
def lowercase( UpperCamelCase_ = 4 ) -> Dict:
'''simple docstring'''
UpperCamelCase = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def lowercase( UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def lowercase( UpperCamelCase_ ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = matrix[::-1]
return matrix
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
UpperCamelCase = [x[::-1] for x in matrix]
return matrix
def lowercase( UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 343
|
"""simple docstring"""
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = False
while is_sorted is False: # Until all the indices are traversed keep looping
__lowerCAmelCase = True
for i in range(0 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
for i in range(1 , len(_UpperCamelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = input_list[i + 1], input_list[i]
# swapping if elements not in order
__lowerCAmelCase = False
return input_list
if __name__ == "__main__":
print("Enter list to be sorted")
A : Union[str, Any] = [int(x) for x in input().split()]
# inputing elements of the list in one line
A : str = odd_even_sort(input_list)
print("The sorted list is")
print(sorted_list)
| 57
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__A = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 368
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=1e-12 ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Tuple = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
lowerCAmelCase__ :int = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(_SCREAMING_SNAKE_CASE , axis=1 ) , a_min=_SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(_SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
__magic_name__ :CLIPConfig
__magic_name__ :jnp.dtype = jnp.floataa
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ :str = nn.Dense(self.config.projection_dim , use_bias=__UpperCAmelCase , dtype=self.dtype )
lowerCAmelCase__ :Optional[Any] = self.param('concept_embeds' , jax.nn.initializers.ones , (1_7, self.config.projection_dim) )
lowerCAmelCase__ :Optional[int] = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
lowerCAmelCase__ :Any = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (1_7,) )
lowerCAmelCase__ :List[Any] = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.vision_model(__UpperCAmelCase )[1]
lowerCAmelCase__ :Optional[int] = self.visual_projection(__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = jax_cosine_distance(__UpperCAmelCase , self.special_care_embeds )
lowerCAmelCase__ :Tuple = jax_cosine_distance(__UpperCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ :Dict = 0.0
lowerCAmelCase__ :List[str] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ :Optional[Any] = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(special_scores > 0 , axis=1 , keepdims=__UpperCAmelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ :List[Any] = is_special_care * 0.01
lowerCAmelCase__ :Union[str, Any] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ :Any = jnp.round(__UpperCAmelCase , 3 )
lowerCAmelCase__ :Tuple = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Tuple = CLIPConfig
__magic_name__ :Tuple = """clip_input"""
__magic_name__ :str = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = 0 , __UpperCAmelCase = jnp.floataa , __UpperCAmelCase = True , **__UpperCAmelCase , ):
'''simple docstring'''
if input_shape is None:
lowerCAmelCase__ :Dict = (1, 2_2_4, 2_2_4, 3)
lowerCAmelCase__ :Any = self.module_class(config=__UpperCAmelCase , dtype=__UpperCAmelCase , **__UpperCAmelCase )
super().__init__(__UpperCAmelCase , __UpperCAmelCase , input_shape=__UpperCAmelCase , seed=__UpperCAmelCase , dtype=__UpperCAmelCase , _do_init=_do_init )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :str = jax.random.normal(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = jax.random.split(__UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = {'params': params_rng, 'dropout': dropout_rng}
lowerCAmelCase__ :Optional[int] = self.module.init(__UpperCAmelCase , __UpperCAmelCase )['params']
return random_params
def __call__( self , __UpperCAmelCase , __UpperCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = jnp.transpose(__UpperCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(__UpperCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 254
| 0
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = args.log_outputs
A__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
A__ = load_metric('''wer''' )
A__ = load_metric('''cer''' )
# compute metrics
A__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
A__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
A__ = f"""WER: {wer_result}\nCER: {cer_result}"""
print(lowercase_ )
with open(f"""{dataset_id}_eval_results.txt""" , '''w''' ) as f:
f.write(lowercase_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
A__ = f"""log_{dataset_id}_predictions.txt"""
A__ = f"""log_{dataset_id}_targets.txt"""
with open(lowercase_ , '''w''' ) as p, open(lowercase_ , '''w''' ) as t:
# mapping function to write output
def write_to_file(lowercase_ , lowercase_ ):
p.write(f"""{i}""" + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f"""{i}""" + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(lowercase_ , with_indices=lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
A__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
A__ = re.sub(lowercase_ , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
A__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
A__ = ''' '''.join(text.split(lowercase_ ) )
return text
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=lowercase_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
A__ = AutoFeatureExtractor.from_pretrained(args.model_id )
A__ = feature_extractor.sampling_rate
# resample audio
A__ = dataset.cast_column('''audio''' , Audio(sampling_rate=lowercase_ ) )
# load eval pipeline
if args.device is None:
A__ = 0 if torch.cuda.is_available() else -1
A__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(lowercase_ ):
A__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
A__ = prediction['''text''']
A__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
A__ = dataset.map(lowercase_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(lowercase_ , lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
_lowerCamelCase : str = parser.parse_args()
main(args)
| 14
|
'''simple docstring'''
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class _snake_case ( unittest.TestCase ):
def __init__( self , _lowerCamelCase):
UpperCAmelCase__ : Any = parent
def snake_case__ ( self):
return {}
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = """<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR=\"FFFFFF\">
<HR>
<a href=\"http://google.com\">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style=\"color:#0000FF\">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>"""
UpperCAmelCase__ : Tuple = """
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
"""
return [html_string_a, html_string_a]
@require_bsa
class _snake_case ( a__ , unittest.TestCase ):
lowerCAmelCase :Optional[int] = MarkupLMFeatureExtractor if is_bsa_available() else None
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = MarkupLMFeatureExtractionTester(self)
@property
def snake_case__ ( self):
return self.feature_extract_tester.prepare_feat_extract_dict()
def snake_case__ ( self):
# Initialize feature_extractor
UpperCAmelCase__ : List[Any] = self.feature_extraction_class()
# Test not batched input
UpperCAmelCase__ : Optional[Any] = get_html_strings()[0]
UpperCAmelCase__ : Any = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : Dict = [["""sample document""", """Goog""", """This is one header""", """This is a another Header""", """Travel from""", """SFO to JFK""", """on May 2, 2015 at 2:00 pm. For details go to confirm.com""", """Traveler""", """name""", """is""", """John Doe"""]]
UpperCAmelCase__ : List[str] = [["""/html/head/title""", """/html/body/a""", """/html/body/h1""", """/html/body/h2""", """/html/body/p""", """/html/body/p/p/b[1]""", """/html/body/p/p/b[2]/i""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/b""", """/html/body/p/p/div/h3""", """/html/body/p/p/div/h3/p"""]]
# fmt: on
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
# Test batched
UpperCAmelCase__ : int = get_html_strings()
UpperCAmelCase__ : Optional[Any] = feature_extractor(_lowerCamelCase)
# fmt: off
UpperCAmelCase__ : List[str] = expected_nodes + [["""My First Heading""", """My first paragraph."""]]
UpperCAmelCase__ : str = expected_xpaths + [["""/html/body/h1""", """/html/body/p"""]]
self.assertEqual(len(encoding.nodes) , 2)
self.assertEqual(len(encoding.xpaths) , 2)
self.assertEqual(encoding.nodes , _lowerCamelCase)
self.assertEqual(encoding.xpaths , _lowerCamelCase)
| 163
| 0
|
from __future__ import annotations
import bisect
def a__ ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] = 0 , UpperCAmelCase : Optional[int] = -1 ) -> int:
if hi < 0:
UpperCAmelCase : Optional[Any] = len(UpperCAmelCase )
while lo < hi:
UpperCAmelCase : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
UpperCAmelCase : Union[str, Any] = mid + 1
else:
UpperCAmelCase : Union[str, Any] = mid
return lo
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] = 0 , UpperCAmelCase : List[Any] = -1 ) -> int:
if hi < 0:
UpperCAmelCase : str = len(UpperCAmelCase )
while lo < hi:
UpperCAmelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
UpperCAmelCase : str = mid + 1
else:
UpperCAmelCase : Dict = mid
return lo
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : str = 0 , UpperCAmelCase : Optional[int] = -1 ) -> None:
sorted_collection.insert(bisect_left(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
def a__ ( UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[int] = 0 , UpperCAmelCase : Optional[int] = -1 ) -> None:
sorted_collection.insert(bisect_right(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , UpperCAmelCase )
def a__ ( UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> int | None:
UpperCAmelCase : Any = 0
UpperCAmelCase : int = len(UpperCAmelCase ) - 1
while left <= right:
UpperCAmelCase : Optional[Any] = left + (right - left) // 2
UpperCAmelCase : Any = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
UpperCAmelCase : Union[str, Any] = midpoint - 1
else:
UpperCAmelCase : List[str] = midpoint + 1
return None
def a__ ( UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> int | None:
UpperCAmelCase : Tuple = bisect.bisect_left(UpperCAmelCase , UpperCAmelCase )
if index != len(UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def a__ ( UpperCAmelCase : Any , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int ) -> int | None:
if right < left:
return None
UpperCAmelCase : Any = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCAmelCase , UpperCAmelCase , midpoint + 1 , UpperCAmelCase )
if __name__ == "__main__":
_lowerCamelCase : List[str] = input("Enter numbers separated by comma:\n").strip()
_lowerCamelCase : Dict = sorted(int(item) for item in user_input.split(","))
_lowerCamelCase : Optional[Any] = int(input("Enter a single number to be found in the list:\n"))
_lowerCamelCase : Optional[int] = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 371
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99
| 0
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 17
|
"""simple docstring"""
from math import sqrt
def _A ( UpperCamelCase_ : int) -> int:
'''simple docstring'''
__lowercase = 0
for i in range(1, int(sqrt(UpperCamelCase_) + 1)):
if n % i == 0 and i != sqrt(UpperCamelCase_):
total += i + n // i
elif i == sqrt(UpperCamelCase_):
total += i
return total - n
def _A ( UpperCamelCase_ : int = 10000) -> int:
'''simple docstring'''
__lowercase = sum(
i
for i in range(1, UpperCamelCase_)
if sum_of_divisors(sum_of_divisors(UpperCamelCase_)) == i and sum_of_divisors(UpperCamelCase_) != i)
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 17
| 1
|
a_ : Any = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
SCREAMING_SNAKE_CASE = Stack()
SCREAMING_SNAKE_CASE = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_UpperCAmelCase))
elif i in operators:
# RULE 2
operator_stack.push(_UpperCAmelCase)
elif i == ")":
# RULE 4
SCREAMING_SNAKE_CASE = operator_stack.peek()
operator_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operand_stack.peek()
operand_stack.pop()
SCREAMING_SNAKE_CASE = operators[opr](_UpperCAmelCase , _UpperCAmelCase)
operand_stack.push(_UpperCAmelCase)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
a_ : Tuple = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 327
|
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = torch.load(_UpperCAmelCase , map_location='cpu')
SCREAMING_SNAKE_CASE = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE = mam_aaa['model']
remove_ignore_keys_(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE = MaMaaaConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
SCREAMING_SNAKE_CASE = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE = MaMaaaForConditionalGeneration(_UpperCAmelCase)
model.model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase)
SCREAMING_SNAKE_CASE = make_linear_from_emb(model.model.shared)
return model
if __name__ == "__main__":
a_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ : List[str] = parser.parse_args()
a_ : Dict = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 327
| 1
|
'''simple docstring'''
from __future__ import annotations
import pandas as pd
def a__ ( lowercase : list[int], lowercase : list[int], lowercase : int ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = [0] * no_of_processes
_UpperCamelCase = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(lowercase ):
_UpperCamelCase = burst_time[i]
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 999999999
_UpperCamelCase = 0
_UpperCamelCase = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(lowercase ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
_UpperCamelCase = remaining_time[j]
_UpperCamelCase = j
_UpperCamelCase = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
_UpperCamelCase = remaining_time[short]
if minm == 0:
_UpperCamelCase = 999999999
if remaining_time[short] == 0:
complete += 1
_UpperCamelCase = False
# Find finish time of current process
_UpperCamelCase = increment_time + 1
# Calculate waiting time
_UpperCamelCase = finish_time - arrival_time[short]
_UpperCamelCase = finar - burst_time[short]
if waiting_time[short] < 0:
_UpperCamelCase = 0
# Increment time
increment_time += 1
return waiting_time
def a__ ( lowercase : list[int], lowercase : int, lowercase : list[int] ) -> list[int]:
"""simple docstring"""
_UpperCamelCase = [0] * no_of_processes
for i in range(lowercase ):
_UpperCamelCase = burst_time[i] + waiting_time[i]
return turn_around_time
def a__ ( lowercase : list[int], lowercase : list[int], lowercase : int ) -> None:
"""simple docstring"""
_UpperCamelCase = 0
_UpperCamelCase = 0
for i in range(lowercase ):
_UpperCamelCase = total_waiting_time + waiting_time[i]
_UpperCamelCase = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''', total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print('Enter how many process you want to analyze')
lowercase__ : str = int(input())
lowercase__ : Optional[int] = [0] * no_of_processes
lowercase__ : List[Any] = [0] * no_of_processes
lowercase__ : Dict = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print('Enter the arrival time and burst time for process:--' + str(i + 1))
lowercase__ , lowercase__ : Dict = map(int, input().split())
lowercase__ : List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase__ : int = burst_time
lowercase__ : List[str] = no_of_processes
lowercase__ : int = waiting_time
lowercase__ : Any = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
lowercase__ : Optional[Any] = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
'Process',
'BurstTime',
'ArrivalTime',
'WaitingTime',
'TurnAroundTime',
],
)
# Printing the dataFrame
pd.set_option('display.max_rows', fcfs.shape[0] + 1)
print(fcfs)
| 324
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
# General docstring
lowercase__ : Dict = 'ResNetConfig'
# Base docstring
lowercase__ : str = 'microsoft/resnet-50'
lowercase__ : Tuple = [1, 20_48, 7, 7]
# Image classification docstring
lowercase__ : Optional[Any] = 'microsoft/resnet-50'
lowercase__ : List[str] = 'tiger cat'
lowercase__ : List[Any] = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 3 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(
lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=lowerCAmelCase__ , stride=lowerCAmelCase__ , padding=kernel_size // 2 , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
_UpperCamelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : ResNetConfig ) -> Tuple:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_UpperCamelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_UpperCamelCase = config.num_channels
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
return embedding
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.Convad(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , stride=lowerCAmelCase__ , bias=lowerCAmelCase__ )
_UpperCamelCase = nn.BatchNormad(lowerCAmelCase__ )
def snake_case__ ( self : Any , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = self.convolution(lowerCAmelCase__ )
_UpperCamelCase = self.normalization(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" ) -> str:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : str = "relu" , lowerCAmelCase__ : int = 4 ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = in_channels != out_channels or stride != 1
_UpperCamelCase = out_channels // reduction
_UpperCamelCase = (
ResNetShortCut(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) if should_apply_shortcut else nn.Identity()
)
_UpperCamelCase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ ) , ResNetConvLayer(lowerCAmelCase__ , lowerCAmelCase__ , kernel_size=1 , activation=lowerCAmelCase__ ) , )
_UpperCamelCase = ACTaFN[activation]
def snake_case__ ( self : int , lowerCAmelCase__ : List[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = hidden_state
_UpperCamelCase = self.layer(lowerCAmelCase__ )
_UpperCamelCase = self.shortcut(lowerCAmelCase__ )
hidden_state += residual
_UpperCamelCase = self.activation(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : ResNetConfig , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int = 2 , lowerCAmelCase__ : int = 2 , ) -> int:
'''simple docstring'''
super().__init__()
_UpperCamelCase = ResNetBottleNeckLayer if config.layer_type == '''bottleneck''' else ResNetBasicLayer
_UpperCamelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase__ , lowerCAmelCase__ , stride=lowerCAmelCase__ , activation=config.hidden_act ) , *[layer(lowerCAmelCase__ , lowerCAmelCase__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Tensor ) -> Tensor:
'''simple docstring'''
_UpperCamelCase = input
for layer in self.layers:
_UpperCamelCase = layer(lowerCAmelCase__ )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : ResNetConfig ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_UpperCamelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase__ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , depth=lowerCAmelCase__ ) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = True ) -> BaseModelOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
_UpperCamelCase = stage_module(lowerCAmelCase__ )
if output_hidden_states:
_UpperCamelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase__ , hidden_states=lowerCAmelCase__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Optional[int] = ResNetConfig
_snake_case : Union[str, Any] = 'resnet'
_snake_case : Optional[int] = 'pixel_values'
_snake_case : int = True
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : List[str] ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(lowerCAmelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=False ) -> List[str]:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = value
lowercase__ : Optional[int] = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowercase__ : Any = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
_UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
'''simple docstring'''
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(
lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = encoder_outputs[0]
_UpperCamelCase = self.pooler(lowerCAmelCase__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase__ , pooler_output=lowerCAmelCase__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = ResNetModel(lowerCAmelCase__ )
# classification head
_UpperCamelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[torch.LongTensor] = None , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = self.resnet(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCamelCase = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCamelCase = '''single_label_classification'''
else:
_UpperCamelCase = '''multi_label_classification'''
if self.config.problem_type == "regression":
_UpperCamelCase = MSELoss()
if self.num_labels == 1:
_UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCamelCase = BCEWithLogitsLoss()
_UpperCamelCase = loss_fct(lowerCAmelCase__ , lowerCAmelCase__ )
if not return_dict:
_UpperCamelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase__ , logits=lowerCAmelCase__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Any ) -> Dict:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
super()._init_backbone(lowerCAmelCase__ )
_UpperCamelCase = [config.embedding_size] + config.hidden_sizes
_UpperCamelCase = ResNetEmbeddings(lowerCAmelCase__ )
_UpperCamelCase = ResNetEncoder(lowerCAmelCase__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
@replace_return_docstrings(output_type=lowerCAmelCase__ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Tensor , lowerCAmelCase__ : Optional[bool] = None , lowerCAmelCase__ : Optional[bool] = None ) -> BackboneOutput:
'''simple docstring'''
_UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCamelCase = self.embedder(lowerCAmelCase__ )
_UpperCamelCase = self.encoder(lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , return_dict=lowerCAmelCase__ )
_UpperCamelCase = outputs.hidden_states
_UpperCamelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_UpperCamelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase__ , )
| 324
| 1
|
"""simple docstring"""
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = ''
for i in range(len(a_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = int('0b' + data[0] + data[-1], 2 )
lowerCamelCase : List[Any] = int('0b' + data[1:3], 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase ( a_, a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : Dict = message[:4]
lowerCamelCase : str = message[4:]
lowerCamelCase : int = apply_table(a_, a_ )
lowerCamelCase : int = xor(a_, a_ )
lowerCamelCase : Optional[int] = apply_sbox(a_, temp[:4] ) # noqa: E741
lowerCamelCase : List[str] = apply_sbox(a_, temp[4:] )
lowerCamelCase : Optional[Any] = '0' * (2 - len(a_ )) + l # noqa: E741
lowerCamelCase : List[Any] = '0' * (2 - len(a_ )) + r
lowerCamelCase : str = apply_table(l + r, a_ )
lowerCamelCase : Tuple = xor(a_, a_ )
return temp + right
if __name__ == "__main__":
_A = input('Enter 10 bit key: ')
_A = input('Enter 8 bit message: ')
_A = [6, 3, 7, 4, 8, 5, 1_0, 9]
_A = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
_A = [2, 4, 3, 1]
_A = [2, 6, 3, 1, 4, 8, 5, 7]
_A = [4, 1, 3, 5, 7, 2, 8, 6]
_A = [4, 1, 2, 3, 2, 3, 4, 1]
_A = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_A = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_A = apply_table(key, paa_table)
_A = temp[:5]
_A = temp[5:]
_A = left_shift(left)
_A = left_shift(right)
_A = apply_table(left + right, pa_table)
_A = left_shift(left)
_A = left_shift(right)
_A = left_shift(left)
_A = left_shift(right)
_A = apply_table(left + right, pa_table)
# encryption
_A = apply_table(message, IP)
_A = function(expansion, sa, sa, keya, temp)
_A = temp[4:] + temp[:4]
_A = function(expansion, sa, sa, keya, temp)
_A = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
_A = apply_table(CT, IP)
_A = function(expansion, sa, sa, keya, temp)
_A = temp[4:] + temp[:4]
_A = function(expansion, sa, sa, keya, temp)
_A = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 205
|
"""simple docstring"""
def UpperCAmelCase ( ):
'''simple docstring'''
return 1
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(a_ )
def UpperCAmelCase ( a_ ):
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(a_ )
def UpperCAmelCase ( a_ = 200 ):
'''simple docstring'''
return two_pound(a_ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 205
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a : Union[str, Any] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a : List[str] = 'sshleifer/student_marian_en_ro_6_1'
a : Dict = 'sshleifer/tiny-mbart'
@require_torch
class a ( _lowerCamelCase ):
def A_ ( self : int , lowercase_ : Any=False , lowercase_ : int=None , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=True , lowercase_ : str=True , ):
snake_case_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=lowercase_ , num_train_epochs=1 , distributed=lowercase_ , extra_args_str=lowercase_ , predict_with_generate=lowercase_ , do_train=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , )
snake_case_ = TrainerState.load_from_json(os.path.join(lowercase_ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
snake_case_ = [log for log in logs if '''eval_loss''' in log.keys()]
snake_case_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , lowercase_ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def A_ ( self : Any ):
self.run_seqaseq_quick()
@require_torch_multi_gpu
def A_ ( self : Optional[int] ):
self.run_seqaseq_quick(distributed=lowercase_ )
@require_torch_multi_gpu
def A_ ( self : str ):
self.run_seqaseq_quick(distributed=lowercase_ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A_ ( self : int ):
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A_ ( self : str ):
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A_ ( self : Tuple ):
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=lowercase_ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def A_ ( self : int ):
self.run_seqaseq_quick(
distributed=lowercase_ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=lowercase_ )
@require_apex
@require_torch_gpu
def A_ ( self : int ):
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=lowercase_ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def A_ ( self : Optional[int] , lowercase_ : Dict ):
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
snake_case_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
snake_case_ = experiments[experiment_id]
snake_case_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
snake_case_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**lowercase_ , extra_args_str=data['''extra_args_str'''] )
snake_case_ = len(re.findall(lowercase_ , cl.err ) )
self.assertEqual(lowercase_ , data['''n_matches'''] )
@slow
def A_ ( self : Optional[Any] ):
snake_case_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=lowercase_ , learning_rate=3e-4 , num_train_epochs=10 , distributed=lowercase_ , )
# Check metrics
snake_case_ = TrainerState.load_from_json(os.path.join(lowercase_ , '''trainer_state.json''' ) ).log_history
snake_case_ = [log for log in logs if '''eval_loss''' in log.keys()]
snake_case_ = eval_metrics[0]
snake_case_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , lowercase_ )
# test if do_predict saves generations and metrics
snake_case_ = os.listdir(lowercase_ )
snake_case_ = {os.path.basename(lowercase_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def A_ ( self : Tuple ):
from transformers.training_args import OptimizerNames
def train_and_return_metrics(lowercase_ : str ) -> Tuple[int, float]:
snake_case_ = '''--skip_memory_metrics 0'''
snake_case_ = self.run_trainer(
max_len=128 , model_name=lowercase_ , learning_rate=3e-4 , num_train_epochs=1 , optim=lowercase_ , distributed=lowercase_ , extra_args_str=lowercase_ , do_eval=lowercase_ , do_predict=lowercase_ , n_gpus_to_use=1 , )
# Check metrics
snake_case_ = TrainerState.load_from_json(Path(lowercase_ , '''trainer_state.json''' ) ).log_history
snake_case_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
snake_case_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
snake_case_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case_ ,snake_case_ ,snake_case_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case_ ,snake_case_ ,snake_case_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
lowercase_ , lowercase_ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
lowercase_ , lowercase_ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
lowercase_ , lowercase_ , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" )
def A_ ( self : int , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : float = 3e-3 , lowercase_ : str = "adafactor" , lowercase_ : bool = False , lowercase_ : str = None , lowercase_ : int = 0 , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : bool = True , lowercase_ : int = None , ):
snake_case_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
snake_case_ = self.get_auto_remove_tmp_dir()
snake_case_ = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(lowercase_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(lowercase_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
snake_case_ = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(lowercase_ )}\n ".split()
snake_case_ = '''
--do_predict
'''.split()
snake_case_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case_ = get_gpu_count()
snake_case_ = get_torch_dist_unique_port()
snake_case_ = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
snake_case_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase_ , env=self.get_env() )
else:
snake_case_ = ['''run_translation.py'''] + args
with patch.object(lowercase_ , '''argv''' , lowercase_ ):
main()
return output_dir
| 56
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
_UpperCamelCase = None
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_UpperCamelCase = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
_UpperCamelCase = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class _A ( __SCREAMING_SNAKE_CASE ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : str = ["input_ids", "attention_mask"]
_SCREAMING_SNAKE_CASE : Optional[Any] = TaTokenizer
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="<pad>" , __UpperCAmelCase=100 , __UpperCAmelCase=None , **__UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__UpperCAmelCase : List[Any] = [f'<extra_id_{i}>' for i in range(__UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__UpperCAmelCase : Any = len(set(filter(lambda __UpperCAmelCase : bool("""extra_id_""" in str(__UpperCAmelCase ) ) , __UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
""" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"""
""" tokens""" )
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , extra_ids=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
__UpperCAmelCase : Optional[int] = vocab_file
__UpperCAmelCase : Any = False if not self.vocab_file else True
__UpperCAmelCase : Optional[int] = extra_ids
@staticmethod
def __A ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__UpperCAmelCase : int = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"""This tokenizer was incorrectly instantiated with a model max length of"""
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
""" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"""
""" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"""
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
""" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"""
""" instantiate this tokenizer with `model_max_length` set to your preferred value.""" , __UpperCAmelCase , )
return max_model_length
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCAmelCase : Any = os.path.join(
__UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
logger.info(f'Copy vocab file to {out_vocab_file}' )
return (out_vocab_file,)
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__UpperCAmelCase : Optional[Any] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def __A ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __A ( self ) -> Any:
'''simple docstring'''
return list(
set(filter(lambda __UpperCAmelCase : bool(re.search(r"""<extra_id_\d+>""" , __UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
return [self.convert_tokens_to_ids(__UpperCAmelCase ) for token in self.get_sentinel_tokens()]
| 254
| 0
|
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowercase : Any = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class UpperCAmelCase_ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE = " " ) -> List[Any]:
snake_case_ : int = sentence_delimiter
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> int:
return list(_SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE ) -> Dict:
snake_case_ : Any = []
for sent_idx, sentence in enumerate(_SCREAMING_SNAKE_CASE ):
chars.extend(self.process_string(_SCREAMING_SNAKE_CASE ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_SCREAMING_SNAKE_CASE ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowercase : Optional[Any] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowercase : str = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowercase : Optional[Any] = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
lowercase : Any = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
lowercase : Any = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _lowerCAmelCase ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
if concatenate_texts:
return jiwer.compute_measures(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truth_transform=_SCREAMING_SNAKE_CASE , hypothesis_transform=_SCREAMING_SNAKE_CASE , )["wer"]
snake_case_ : Tuple = 0
snake_case_ : Optional[int] = 0
for prediction, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case_ : Any = jiwer.compute_measures(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , truth_transform=_SCREAMING_SNAKE_CASE , hypothesis_transform=_SCREAMING_SNAKE_CASE , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 362
|
def lowerCAmelCase__ ( _a : int = 50 ):
snake_case_ : Union[str, Any] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 36
| 0
|
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Tuple =ComputeEnvironment.AMAZON_SAGEMAKER
a : str =True
a : List[Any] ="ml.p3.2xlarge"
a : List[Any] ="accelerate_sagemaker_execution_role"
a : str ="hf-sm"
a : List[Any] ="us-east-1"
a : Optional[int] =1
a : int ="accelerate-sagemaker-1"
a : Tuple ="1.6"
a : List[Any] ="4.4"
a : Dict ="train.py"
a : Dict =[
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
a : int =[
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["model_name_or_path"] , snake_case__ )
assert isinstance(converted_args["do_train"] , snake_case__ )
assert isinstance(converted_args["epochs"] , snake_case__ )
assert isinstance(converted_args["learning_rate"] , snake_case__ )
assert isinstance(converted_args["max_steps"] , snake_case__ )
with pytest.raises(snake_case__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 108
|
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 0
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : list , UpperCAmelCase_ : int = 0 ) -> list:
'''simple docstring'''
__snake_case : Tuple = length or len(UpperCAmelCase_ )
__snake_case : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__snake_case : Dict = list_data[i + 1], list_data[i]
__snake_case : Any = True
return list_data if not swapped else bubble_sort(UpperCAmelCase_ , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_a : int= False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Optional[Any]) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : int) -> Tuple:
__snake_case : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : List[Any] = torch.manual_seed(0)
__snake_case : Optional[int] = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_A)
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained(_A , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = generator.manual_seed(0)
__snake_case : int = pipe.dual_guided(
prompt='first prompt' , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image).sum() < 1E-5, "Models don't have the same forward pass"
def _lowercase (self : Optional[Any]) -> Optional[int]:
__snake_case : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa)
pipe.to(_A)
pipe.set_progress_bar_config(disable=_A)
__snake_case : Tuple = 'cyberpunk 2077'
__snake_case : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg')
__snake_case : str = torch.manual_seed(0)
__snake_case : Union[str, Any] = pipe.dual_guided(
prompt=_A , image=_A , text_to_image_strength=0.75 , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
__snake_case : Optional[Any] = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Tuple = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = 'A painting of a squirrel eating a burger '
__snake_case : str = torch.manual_seed(0)
__snake_case : str = pipe.text_to_image(
prompt=_A , generator=_A , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy').images
__snake_case : str = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
__snake_case : List[str] = pipe.image_variation(_A , generator=_A , output_type='numpy').images
__snake_case : Dict = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
__snake_case : List[Any] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-1
| 95
| 0
|
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Dict ) -> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
snake_case_ : str = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A , cache_dir=_A )
snake_case_ : List[Any] = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A )[0] , 'snapshots' ) )]
snake_case_ : int = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_A )
snake_case_ : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : int = 4
snake_case_ : Any = jax.device_count()
snake_case_ : List[Any] = num_samples * [prompt]
snake_case_ : str = pipeline.prepare_inputs(_A )
# shard inputs and rng
snake_case_ : Optional[int] = replicate(_A )
snake_case_ : List[Any] = jax.random.split(_A , _A )
snake_case_ : Optional[int] = shard(_A )
snake_case_ : str = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1E-3
assert np.abs(np.abs(_A , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5E-1
snake_case_ : int = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_A ) == num_samples
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_A )
snake_case_ : Optional[int] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : Optional[int] = jax.random.PRNGKey(0 )
snake_case_ : int = 50
snake_case_ : List[str] = jax.device_count()
snake_case_ : str = num_samples * [prompt]
snake_case_ : Optional[int] = pipeline.prepare_inputs(_A )
# shard inputs and rng
snake_case_ : List[Any] = replicate(_A )
snake_case_ : Optional[int] = jax.random.split(_A , _A )
snake_case_ : int = shard(_A )
snake_case_ : Dict = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5E-1
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ ,snake_case_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A )
snake_case_ : int = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : Any = jax.random.PRNGKey(0 )
snake_case_ : Union[str, Any] = 50
snake_case_ : Dict = jax.device_count()
snake_case_ : List[str] = num_samples * [prompt]
snake_case_ : int = pipeline.prepare_inputs(_A )
# shard inputs and rng
snake_case_ : Optional[int] = replicate(_A )
snake_case_ : Tuple = jax.random.split(_A , _A )
snake_case_ : List[Any] = shard(_A )
snake_case_ : str = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
snake_case_ : str = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : List[str] = jax.random.PRNGKey(0 )
snake_case_ : List[Any] = 50
snake_case_ : Any = jax.device_count()
snake_case_ : Union[str, Any] = num_samples * [prompt]
snake_case_ : Dict = pipeline.prepare_inputs(_A )
# shard inputs and rng
snake_case_ : Optional[Any] = replicate(_A )
snake_case_ : str = jax.random.split(_A , _A )
snake_case_ : Union[str, Any] = shard(_A )
snake_case_ : List[Any] = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5E-1
def UpperCAmelCase_ ( self : str ) -> str:
"""simple docstring"""
snake_case_ : Optional[Any] = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_A , steps_offset=1 , )
snake_case_ ,snake_case_ : List[Any] = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , )
snake_case_ : List[Any] = scheduler.create_state()
snake_case_ : Optional[int] = scheduler_state
snake_case_ : Union[str, Any] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : Union[str, Any] = jax.random.PRNGKey(0 )
snake_case_ : Optional[int] = 50
snake_case_ : int = jax.device_count()
snake_case_ : List[Any] = num_samples * [prompt]
snake_case_ : str = pipeline.prepare_inputs(_A )
# shard inputs and rng
snake_case_ : int = replicate(_A )
snake_case_ : Dict = jax.random.split(_A , _A )
snake_case_ : Optional[int] = shard(_A )
snake_case_ : Dict = pipeline(_A , _A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1E-3
assert np.abs((np.abs(_A , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5E-1
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
snake_case_ : List[str] = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
snake_case_ : Dict = jax.device_count()
snake_case_ : Optional[Any] = num_samples * [prompt]
snake_case_ : List[str] = jax.random.split(jax.random.PRNGKey(0 ) , _A )
snake_case_ ,snake_case_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , )
snake_case_ : Any = replicate(_A )
snake_case_ : Optional[Any] = pipeline.prepare_inputs(_A )
snake_case_ : int = shard(_A )
snake_case_ : List[Any] = pipeline(_A , _A , _A , jit=_A ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
snake_case_ : Optional[Any] = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
snake_case_ ,snake_case_ : str = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , )
snake_case_ : str = replicate(_A )
snake_case_ : Tuple = pipeline.prepare_inputs(_A )
snake_case_ : Union[str, Any] = shard(_A )
snake_case_ : Union[str, Any] = pipeline(_A , _A , _A , jit=_A ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
snake_case_ : Optional[int] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1E-2
| 327
|
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE = get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : Dict = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Dict = os.path.join(__a , __a )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Dict = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Dict = os.path.join(__a , __a )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(__a , __a )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Optional[int] = os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving model to {ckpt_dir}""" )
snake_case_ : int = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=__a , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(__a ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
snake_case_ : Optional[int] = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
snake_case_ : Optional[Any] = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[Any] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ : Optional[Any] = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
snake_case_ : Tuple = os.path.join(__a , __a )
logger.info(f"""Loading model from {input_model_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ : Tuple = (
os.path.join(__a , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
snake_case_ : List[Any] = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=__a , storage_reader=dist_cp.FileSystemReader(__a ) , planner=DefaultLoadPlanner() , )
snake_case_ : Any = state_dict['model']
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(__a )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
os.makedirs(__a , exist_ok=__a )
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ : List[str] = FSDP.optim_state_dict(__a , __a )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ : str = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : Any = os.path.join(__a , __a )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(__a , __a )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
snake_case_ : Optional[int] = os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(__a , exist_ok=__a )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(__a ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a , __a , __a=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
__a , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ : Optional[Any] = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ : Union[str, Any] = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
snake_case_ : List[Any] = os.path.join(__a , __a )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
snake_case_ : Optional[int] = torch.load(__a )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
snake_case_ : str = (
os.path.join(__a , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
snake_case_ : Any = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(__a ) , )
snake_case_ : Optional[int] = optim_state['optimizer']
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
snake_case_ : Optional[Any] = FSDP.optim_state_dict_to_load(__a , __a , __a )
optimizer.load_state_dict(__a )
| 327
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = ['GPTSw3Tokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_snake_case : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 368
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = 42
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : List[Any] , lowerCAmelCase_ : int = 6_5_5_3_6 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 2 , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : str = "fourier" , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : float = 0.0 , lowerCAmelCase_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCAmelCase_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCAmelCase_ : Tuple[str] = "UNetMidBlock1D" , lowerCAmelCase_ : str = None , lowerCAmelCase_ : Tuple[int] = (3_2, 3_2, 6_4) , lowerCAmelCase_ : str = None , lowerCAmelCase_ : int = 8 , lowerCAmelCase_ : int = 1 , lowerCAmelCase_ : bool = False , ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = sample_size
# time
if time_embedding_type == "fourier":
__lowerCAmelCase = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCAmelCase_ , log=lowerCAmelCase_ , flip_sin_to_cos=lowerCAmelCase_ )
__lowerCAmelCase = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
__lowerCAmelCase = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCAmelCase_ , downscale_freq_shift=lowerCAmelCase_ )
__lowerCAmelCase = block_out_channels[0]
if use_timestep_embedding:
__lowerCAmelCase = block_out_channels[0] * 4
__lowerCAmelCase = TimestepEmbedding(
in_channels=lowerCAmelCase_ , time_embed_dim=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , out_dim=block_out_channels[0] , )
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
__lowerCAmelCase = nn.ModuleList([] )
__lowerCAmelCase = None
# down
__lowerCAmelCase = in_channels
for i, down_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_down_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCAmelCase_ )
# mid
__lowerCAmelCase = get_mid_block(
lowerCAmelCase_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCAmelCase_ , add_downsample=lowerCAmelCase_ , )
# up
__lowerCAmelCase = list(reversed(lowerCAmelCase_ ) )
__lowerCAmelCase = reversed_block_out_channels[0]
if out_block_type is None:
__lowerCAmelCase = out_channels
else:
__lowerCAmelCase = block_out_channels[0]
for i, up_block_type in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = output_channel
__lowerCAmelCase = (
reversed_block_out_channels[i + 1] if i < len(lowerCAmelCase_ ) - 1 else final_upsample_channels
)
__lowerCAmelCase = i == len(lowerCAmelCase_ ) - 1
__lowerCAmelCase = get_up_block(
lowerCAmelCase_ , num_layers=lowerCAmelCase_ , in_channels=lowerCAmelCase_ , out_channels=lowerCAmelCase_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCAmelCase_ )
__lowerCAmelCase = output_channel
# out
__lowerCAmelCase = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
__lowerCAmelCase = get_out_block(
out_block_type=lowerCAmelCase_ , num_groups_out=lowerCAmelCase_ , embed_dim=block_out_channels[0] , out_channels=lowerCAmelCase_ , act_fn=lowerCAmelCase_ , fc_dim=block_out_channels[-1] // 4 , )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : torch.FloatTensor , lowerCAmelCase_ : Union[torch.Tensor, float, int] , lowerCAmelCase_ : bool = True , ) -> Union[UNetaDOutput, Tuple]:
__lowerCAmelCase = timestep
if not torch.is_tensor(lowerCAmelCase_ ):
__lowerCAmelCase = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCAmelCase_ ) and len(timesteps.shape ) == 0:
__lowerCAmelCase = timesteps[None].to(sample.device )
__lowerCAmelCase = self.time_proj(lowerCAmelCase_ )
if self.config.use_timestep_embedding:
__lowerCAmelCase = self.time_mlp(lowerCAmelCase_ )
else:
__lowerCAmelCase = timestep_embed[..., None]
__lowerCAmelCase = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
__lowerCAmelCase = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
__lowerCAmelCase = ()
for downsample_block in self.down_blocks:
__lowerCAmelCase , __lowerCAmelCase = downsample_block(hidden_states=lowerCAmelCase_ , temb=lowerCAmelCase_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
__lowerCAmelCase = self.mid_block(lowerCAmelCase_ , lowerCAmelCase_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
__lowerCAmelCase = down_block_res_samples[-1:]
__lowerCAmelCase = down_block_res_samples[:-1]
__lowerCAmelCase = upsample_block(lowerCAmelCase_ , res_hidden_states_tuple=lowerCAmelCase_ , temb=lowerCAmelCase_ )
# 5. post-process
if self.out_block:
__lowerCAmelCase = self.out_block(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCAmelCase_ )
| 207
| 0
|
import qiskit
def a ( A__ : int , A__ : int ) -> qiskit.result.counts.Counts:
"""simple docstring"""
_lowercase =qiskit.Aer.get_backend('aer_simulator' )
_lowercase =qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_lowercase =qiskit.execute(A__ , A__ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A__ )
if __name__ == "__main__":
lowercase_ = half_adder(1, 1)
print(f"Half Adder Output Qubit Counts: {counts}")
| 205
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
'configuration_pix2struct': [
'PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Pix2StructConfig',
'Pix2StructTextConfig',
'Pix2StructVisionConfig',
],
'processing_pix2struct': ['Pix2StructProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['Pix2StructImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Pix2StructPreTrainedModel',
'Pix2StructForConditionalGeneration',
'Pix2StructVisionModel',
'Pix2StructTextModel',
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : int = {
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = ["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any] = [
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
_a : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 369
|
'''simple docstring'''
import sys
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(lowercase , lowercase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46
| 0
|
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
lowercase : Tuple = get_failure_array(_lowerCamelCase )
# 2) Step through text searching for pattern
lowercase : Optional[int] = 0, 0 # index into text, pattern
while i < len(_lowerCamelCase ):
if pattern[j] == text[i]:
if j == (len(_lowerCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowercase : Tuple = failure[j - 1]
continue
i += 1
return False
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Any:
lowercase : int = [0]
lowercase : str = 0
lowercase : Any = 1
while j < len(_lowerCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowercase : str = failure[i - 1]
continue
j += 1
failure.append(_lowerCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowercase : Optional[int] = """abc1abc12"""
lowercase : Dict = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
lowercase : Union[str, Any] = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowercase : Tuple = """ABABX"""
lowercase : Tuple = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
lowercase : Tuple = """AAAB"""
lowercase : str = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
lowercase : Tuple = """abcdabcy"""
lowercase : List[str] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
lowercase : Dict = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 20
|
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class UpperCAmelCase_ :
def __init__( self, __a = "cpu", __a = "openai/clip-vit-large-patch14"):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = device
_lowerCAmelCase : Optional[int] = CLIPTokenizerFast.from_pretrained(__a)
_lowerCAmelCase : Any = [0.48_145_466, 0.4_578_275, 0.40_821_073]
_lowerCAmelCase : Union[str, Any] = [0.26_862_954, 0.26_130_258, 0.27_577_711]
_lowerCAmelCase : Tuple = torchvision.transforms.Normalize(self.image_mean, self.image_std)
_lowerCAmelCase : Optional[int] = torchvision.transforms.Resize(224)
_lowerCAmelCase : Dict = torchvision.transforms.CenterCrop(224)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.resize(__a)
_lowerCAmelCase : List[str] = self.center_crop(__a)
_lowerCAmelCase : Optional[Any] = self.normalize(__a)
return images
def __call__( self, __a=None, __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : str = self.tokenizer(text=__a, **__a)
_lowerCAmelCase : List[str] = self.preprocess_img(__a)
_lowerCAmelCase : Tuple = {key: value.to(self.device) for (key, value) in encoding.items()}
return encoding
class UpperCAmelCase_ ( nn.Module):
def __init__( self, __a=10, __a=0.01, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=False, __a=True, __a="image", __a=True, __a=False, __a=False, __a=False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[str] = device if device else get_device()
if vqgan:
_lowerCAmelCase : Union[str, Any] = vqgan
else:
_lowerCAmelCase : Optional[Any] = load_vqgan(self.device, conf_path=__a, ckpt_path=__a)
self.vqgan.eval()
if clip:
_lowerCAmelCase : str = clip
else:
_lowerCAmelCase : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip.to(self.device)
_lowerCAmelCase : Optional[int] = ProcessorGradientFlow(device=self.device)
_lowerCAmelCase : Any = iterations
_lowerCAmelCase : List[Any] = lr
_lowerCAmelCase : Tuple = log
_lowerCAmelCase : List[str] = make_grid
_lowerCAmelCase : int = return_val
_lowerCAmelCase : Dict = quantize
_lowerCAmelCase : Any = self.vqgan.decoder.z_shape
def snake_case__ ( self, __a=None, __a=None, __a=5, __a=True):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if output_path is None:
_lowerCAmelCase : List[Any] = "./animation.gif"
if input_path is None:
_lowerCAmelCase : str = self.save_path
_lowerCAmelCase : str = sorted(glob(input_path + "/*"))
if not len(__a):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)")
if len(__a) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)")
_lowerCAmelCase : Optional[int] = total_duration / len(__a)
_lowerCAmelCase : Union[str, Any] = [frame_duration] * len(__a)
if extend_frames:
_lowerCAmelCase : Any = 1.5
_lowerCAmelCase : List[str] = 3
for file_name in paths:
if file_name.endswith(".png"):
images.append(imageio.imread(__a))
imageio.mimsave(__a, __a, duration=__a)
print(f"gif saved to {output_path}")
def snake_case__ ( self, __a=None, __a=None):
'''simple docstring'''
if not (path or img):
raise ValueError("Input either path or tensor")
if img is not None:
raise NotImplementedError
_lowerCAmelCase : Dict = preprocess(Image.open(__a), target_image_size=256).to(self.device)
_lowerCAmelCase : Dict = preprocess_vqgan(__a)
_lowerCAmelCase , *_lowerCAmelCase : str = self.vqgan.encode(__a)
return z
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.latent.detach().requires_grad_()
_lowerCAmelCase : Dict = base_latent + transform_vector
if self.quantize:
_lowerCAmelCase , *_lowerCAmelCase : List[Any] = self.vqgan.quantize(__a)
else:
_lowerCAmelCase : Any = trans_latent
return self.vqgan.decode(__a)
def snake_case__ ( self, __a, __a, __a=None):
'''simple docstring'''
_lowerCAmelCase : int = self.clip_preprocessor(text=__a, images=__a, return_tensors="pt", padding=__a)
_lowerCAmelCase : Optional[int] = self.clip(**__a)
_lowerCAmelCase : Any = clip_outputs.logits_per_image
if weights is not None:
_lowerCAmelCase : Tuple = similarity_logits * weights
return similarity_logits.sum()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self._get_clip_similarity(pos_prompts["prompts"], __a, weights=(1 / pos_prompts["weights"]))
if neg_prompts:
_lowerCAmelCase : List[Any] = self._get_clip_similarity(neg_prompts["prompts"], __a, weights=neg_prompts["weights"])
else:
_lowerCAmelCase : Union[str, Any] = torch.tensor([1], device=self.device)
_lowerCAmelCase : List[str] = -torch.log(__a) + torch.log(__a)
return loss
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = torch.randn_like(self.latent, requires_grad=__a, device=self.device)
_lowerCAmelCase : Optional[int] = torch.optim.Adam([vector], lr=self.lr)
for i in range(self.iterations):
optim.zero_grad()
_lowerCAmelCase : Any = self._add_vector(__a)
_lowerCAmelCase : Optional[Any] = loop_post_process(__a)
_lowerCAmelCase : Optional[Any] = self._get_CLIP_loss(__a, __a, __a)
print("CLIP loss", __a)
if self.log:
wandb.log({"CLIP Loss": clip_loss})
clip_loss.backward(retain_graph=__a)
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0])
else:
yield vector
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
wandb.init(reinit=__a, project="face-editor")
wandb.config.update({"Positive Prompts": positive_prompts})
wandb.config.update({"Negative Prompts": negative_prompts})
wandb.config.update({"lr": self.lr, "iterations": self.iterations})
if image_path:
_lowerCAmelCase : str = Image.open(__a)
_lowerCAmelCase : int = image.resize((256, 256))
wandb.log("Original Image", wandb.Image(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if not prompts:
return []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [prompt.strip() for prompt in prompts.split("|")]
for prompt in prompts:
if isinstance(__a, (tuple, list)):
_lowerCAmelCase : Optional[Any] = prompt[0]
_lowerCAmelCase : Union[str, Any] = float(prompt[1])
elif ":" in prompt:
_lowerCAmelCase , _lowerCAmelCase : int = prompt.split(":")
_lowerCAmelCase : Optional[Any] = float(__a)
else:
_lowerCAmelCase : Optional[int] = prompt
_lowerCAmelCase : List[Any] = 1.0
processed_prompts.append(__a)
weights.append(__a)
return {
"prompts": processed_prompts,
"weights": torch.tensor(__a, device=self.device),
}
def snake_case__ ( self, __a, __a=None, __a=None, __a=True, __a=False, __a=True, __a=True, __a=None, ):
'''simple docstring'''
if image_path:
_lowerCAmelCase : List[Any] = self._get_latent(__a)
else:
_lowerCAmelCase : Any = torch.randn(self.latent_dim, device=self.device)
if self.log:
self._init_logging(__a, __a, __a)
assert pos_prompts, "You must provide at least one positive prompt."
_lowerCAmelCase : int = self.process_prompts(__a)
_lowerCAmelCase : List[str] = self.process_prompts(__a)
if save_final and save_path is None:
_lowerCAmelCase : int = os.path.join("./outputs/", "_".join(pos_prompts["prompts"]))
if not os.path.exists(__a):
os.makedirs(__a)
else:
_lowerCAmelCase : Tuple = save_path + "_" + get_timestamp()
os.makedirs(__a)
_lowerCAmelCase : Tuple = save_path
_lowerCAmelCase : List[Any] = self.vqgan.decode(self.latent)[0]
if show_intermediate:
print("Original Image")
show_pil(custom_to_pil(__a))
_lowerCAmelCase : int = loop_post_process(__a)
for iter, transformed_img in enumerate(self._optimize_CLIP(__a, __a, __a)):
if show_intermediate:
show_pil(__a)
if save_intermediate:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}.png"))
if self.log:
wandb.log({"Image": wandb.Image(__a)})
if show_final:
show_pil(__a)
if save_final:
transformed_img.save(os.path.join(self.save_path, f"iter_{iter:03d}_final.png"))
| 36
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : Any = "deberta-v2"
def __init__( self , A_=128_100 , A_=1_536 , A_=24 , A_=24 , A_=6_144 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0 , A_=0.02 , A_=1e-7 , A_=False , A_=-1 , A_=0 , A_=True , A_=None , A_=0 , A_="gelu" , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(**A_ )
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = relative_attention
UpperCamelCase = max_relative_positions
UpperCamelCase = pad_token_id
UpperCamelCase = position_biased_input
# Backwards compatibility
if type(A_ ) == str:
UpperCamelCase = [x.strip() for x in pos_att_type.lower().split('|' )]
UpperCamelCase = pos_att_type
UpperCamelCase = vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = kwargs.get('pooler_hidden_size' , A_ )
UpperCamelCase = pooler_dropout
UpperCamelCase = pooler_hidden_act
class lowercase ( _SCREAMING_SNAKE_CASE ):
@property
def __UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return 12
def __UpperCamelCase ( self , A_ , A_ = -1 , A_ = -1 , A_ = -1 , A_ = False , A_ = None , A_ = 3 , A_ = 40 , A_ = 40 , A_ = None , ) -> Mapping[str, Any]:
"""simple docstring"""
UpperCamelCase = super().generate_dummy_inputs(preprocessor=A_ , framework=A_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 110
|
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = get_activation('swish' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = get_activation('silu' )
self.assertIsInstance(A_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = get_activation('mish' )
self.assertIsInstance(A_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = get_activation('gelu' )
self.assertIsInstance(A_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 110
| 1
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
__lowerCAmelCase = cva.getAffineTransform(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return cva.warpAffine(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (rows, cols) )
if __name__ == "__main__":
# read original image
UpperCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
UpperCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
UpperCamelCase__ = gray_img.shape
# set different points to rotate image
UpperCamelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
UpperCamelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
UpperCamelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
UpperCamelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
UpperCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
UpperCamelCase__ = plt.figure(1)
UpperCamelCase__ = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 92
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : Dict = logging.get_logger(__name__)
UpperCAmelCase : Tuple = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( UpperCamelCase__):
_lowercase : Any = """swin2sr"""
_lowercase : Tuple = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=6_4 , lowerCAmelCase__=1 , lowerCAmelCase__=3 , lowerCAmelCase__=1_8_0 , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=[6, 6, 6, 6, 6, 6] , lowerCAmelCase__=8 , lowerCAmelCase__=2.0 , lowerCAmelCase__=True , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__="gelu" , lowerCAmelCase__=False , lowerCAmelCase__=0.02 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__="1conv" , lowerCAmelCase__="pixelshuffle" , **lowerCAmelCase__ , ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
a__ : Optional[Any] =image_size
a__ : Dict =patch_size
a__ : Tuple =num_channels
a__ : Union[str, Any] =embed_dim
a__ : Optional[Any] =depths
a__ : List[str] =len(lowerCAmelCase__ )
a__ : Any =num_heads
a__ : Any =window_size
a__ : str =mlp_ratio
a__ : List[str] =qkv_bias
a__ : Dict =hidden_dropout_prob
a__ : List[str] =attention_probs_dropout_prob
a__ : Dict =drop_path_rate
a__ : Optional[Any] =hidden_act
a__ : Union[str, Any] =use_absolute_embeddings
a__ : Optional[Any] =layer_norm_eps
a__ : List[Any] =initializer_range
a__ : int =upscale
a__ : Optional[int] =img_range
a__ : Any =resi_connection
a__ : Optional[Any] =upsampler
| 95
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
_lowerCamelCase : Optional[int] = get_logger()
_lowerCamelCase : Optional[dict] = None
class __UpperCAmelCase ( TensorFormatter[Mapping, '''jax.Array''', Mapping] ):
'''simple docstring'''
def __init__(self : Any , _lowerCAmelCase : Optional[int]=None , _lowerCAmelCase : Union[str, Any]=None , **_lowerCAmelCase : List[Any] ):
super().__init__(features=_lowerCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(_lowerCAmelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
A = device if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
A = str(jax.devices()[0] )
A = jnp_array_kwargs
@staticmethod
def A ():
import jax
return {str(_lowerCAmelCase ): device for device in jax.devices()}
def A (self : Tuple , _lowerCAmelCase : List[Any] ):
import jax
import jax.numpy as jnp
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and column:
if all(
isinstance(_lowerCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_lowerCAmelCase , axis=0 )
return column
def A (self : Tuple , _lowerCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(_lowerCAmelCase , (str, bytes, type(_lowerCAmelCase )) ):
return value
elif isinstance(_lowerCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A = {}
if isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A = {"""dtype""": jnp.intaa}
else:
A = {"""dtype""": jnp.intaa}
elif isinstance(_lowerCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_lowerCAmelCase , PIL.Image.Image ):
A = np.asarray(_lowerCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_lowerCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def A (self : Union[str, Any] , _lowerCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_lowerCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_lowerCAmelCase , """__array__""" ) and not isinstance(_lowerCAmelCase , jax.Array ):
A = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_lowerCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
elif isinstance(_lowerCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_lowerCAmelCase ) for substruct in data_struct] )
return self._tensorize(_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : dict ):
return map_nested(self._recursive_tensorize , _lowerCAmelCase , map_list=_lowerCAmelCase )
def A (self : str , _lowerCAmelCase : pa.Table ):
A = self.numpy_arrow_extractor().extract_row(_lowerCAmelCase )
A = self.python_features_decoder.decode_row(_lowerCAmelCase )
return self.recursive_tensorize(_lowerCAmelCase )
def A (self : Tuple , _lowerCAmelCase : pa.Table ):
A = self.numpy_arrow_extractor().extract_column(_lowerCAmelCase )
A = self.python_features_decoder.decode_column(_lowerCAmelCase , pa_table.column_names[0] )
A = self.recursive_tensorize(_lowerCAmelCase )
A = self._consolidate(_lowerCAmelCase )
return column
def A (self : Tuple , _lowerCAmelCase : pa.Table ):
A = self.numpy_arrow_extractor().extract_batch(_lowerCAmelCase )
A = self.python_features_decoder.decode_batch(_lowerCAmelCase )
A = self.recursive_tensorize(_lowerCAmelCase )
for column_name in batch:
A = self._consolidate(batch[column_name] )
return batch
| 337
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def __a ( UpperCAmelCase ) ->List[int]:
"""simple docstring"""
if isinstance(UpperCAmelCase , np.ndarray ):
return list(tensor.shape )
A = tf.shape(UpperCAmelCase )
if tensor.shape == tf.TensorShape(UpperCAmelCase ):
return dynamic
A = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(UpperCAmelCase )]
def __a ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) ->tf.Tensor:
"""simple docstring"""
return tf.nn.softmax(logits=logits + 1E-9 , axis=UpperCAmelCase , name=UpperCAmelCase )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=1E-5 , UpperCAmelCase=-1 ) ->str:
"""simple docstring"""
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise NotImplementedError("""Only 1D weight and bias tensors are supported for now, with only a single axis.""" )
# Get mean and variance on the axis to be normalized
A , A = tf.nn.moments(UpperCAmelCase , axes=[axis] , keepdims=UpperCAmelCase )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
A = [1] * inputs.shape.rank
A = shape_list(UpperCAmelCase )[axis]
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
A = tf.reshape(UpperCAmelCase , UpperCAmelCase )
# Compute layer normalization using the batch_normalization
# function.
A = tf.nn.batch_normalization(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , offset=UpperCAmelCase , scale=UpperCAmelCase , variance_epsilon=UpperCAmelCase , )
return outputs
def __a ( UpperCAmelCase , UpperCAmelCase=0 , UpperCAmelCase=-1 ) ->int:
"""simple docstring"""
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
A = tf.shape(UpperCAmelCase )
A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(UpperCAmelCase , UpperCAmelCase )
def __a ( UpperCAmelCase ) ->tf.Tensor:
"""simple docstring"""
if not isinstance(UpperCAmelCase , tf.Tensor ):
A = tf.convert_to_tensor(UpperCAmelCase ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
A = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
A = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "input_ids" ) ->None:
"""simple docstring"""
tf.debugging.assert_less(
UpperCAmelCase , tf.cast(UpperCAmelCase , dtype=tensor.dtype ) , message=(
f"""The maximum value of {tensor_name} ({tf.math.reduce_max(UpperCAmelCase )}) must be smaller than the embedding """
f"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
A = 64512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
A = [x for x in data if len(UpperCAmelCase ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"""The following attributes cannot be saved to HDF5 file because """
f"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
f"""bytes: {bad_attributes}""" )
A = np.asarray(UpperCAmelCase )
A = 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
A = np.array_split(UpperCAmelCase , UpperCAmelCase )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(UpperCAmelCase ):
A = chunk_data
else:
A = data
def __a ( UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if name in group.attrs:
A = [n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs[name]]
else:
A = []
A = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("""utf8""" ) if hasattr(UpperCAmelCase , """decode""" ) else n for n in group.attrs["""%s%d""" % (name, chunk_id)]] )
chunk_id += 1
return data
def __a ( UpperCAmelCase ) ->Optional[Any]:
"""simple docstring"""
def _expand_single_ad_tensor(UpperCAmelCase ):
if isinstance(UpperCAmelCase , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(UpperCAmelCase , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , UpperCAmelCase )
| 337
| 1
|
"""simple docstring"""
from math import pi, sqrt, tan
def _snake_case ( lowercase__ ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _snake_case ( lowercase__ ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _snake_case ( lowercase__ ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _snake_case ( lowercase__ , lowercase__ ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
_lowerCamelCase : Union[str, Any] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _snake_case ( lowercase__ , lowercase__ ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _snake_case ( lowercase__ , lowercase__ ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCamelCase_ , 2 ) * torus_radius * tube_radius
def _snake_case ( lowercase__ , lowercase__ ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _snake_case ( lowercase__ ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _snake_case ( lowercase__ , lowercase__ ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
_lowerCamelCase : Any = (sidea + sidea + sidea) / 2
_lowerCamelCase : str = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _snake_case ( lowercase__ , lowercase__ ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _snake_case ( lowercase__ ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _snake_case ( lowercase__ , lowercase__ ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _snake_case ( lowercase__ , lowercase__ ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _snake_case ( lowercase__ , lowercase__ ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("""[DEMO] Areas of various geometric shapes: \n""")
print(F"Rectangle: {area_rectangle(10, 20) = }")
print(F"Square: {area_square(10) = }")
print(F"Triangle: {area_triangle(10, 10) = }")
print(F"Triangle: {area_triangle_three_sides(5, 12, 13) = }")
print(F"Parallelogram: {area_parallelogram(10, 20) = }")
print(F"Rhombus: {area_rhombus(10, 20) = }")
print(F"Trapezium: {area_trapezium(10, 20, 30) = }")
print(F"Circle: {area_circle(20) = }")
print(F"Ellipse: {area_ellipse(10, 20) = }")
print("""\nSurface Areas of various geometric shapes: \n""")
print(F"Cube: {surface_area_cube(20) = }")
print(F"Cuboid: {surface_area_cuboid(10, 20, 30) = }")
print(F"Sphere: {surface_area_sphere(20) = }")
print(F"Hemisphere: {surface_area_hemisphere(20) = }")
print(F"Cone: {surface_area_cone(10, 20) = }")
print(F"Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }")
print(F"Cylinder: {surface_area_cylinder(10, 20) = }")
print(F"Torus: {surface_area_torus(20, 10) = }")
print(F"Equilateral Triangle: {area_reg_polygon(3, 10) = }")
print(F"Square: {area_reg_polygon(4, 10) = }")
print(F"Reqular Pentagon: {area_reg_polygon(5, 10) = }")
| 96
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''num_attention_heads''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Tuple, lowerCamelCase : str, lowerCamelCase : str=13, lowerCamelCase : Union[str, Any]=64, lowerCamelCase : str=3, lowerCamelCase : int=3, lowerCamelCase : Dict=2, lowerCamelCase : int=1, lowerCamelCase : Optional[Any]=16, lowerCamelCase : Dict=[128, 256, 384], lowerCamelCase : Tuple=[4, 6, 8], lowerCamelCase : Optional[Any]=[2, 3, 4], lowerCamelCase : str=[16, 16, 16], lowerCamelCase : Dict=0, lowerCamelCase : List[str]=[2, 2, 2], lowerCamelCase : str=[2, 2, 2], lowerCamelCase : List[Any]=0.02, lowerCamelCase : Any=True, lowerCamelCase : Tuple=True, lowerCamelCase : Optional[Any]=2, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowercase__ ( self : Tuple ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase__ ( self : Any, lowerCamelCase : List[Any], lowerCamelCase : int, lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = LevitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : int, lowerCamelCase : List[Any], lowerCamelCase : List[Any] ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
lowercase__ = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase, hidden_size=37 )
def lowercase__ ( self : str ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase__ ( self : Tuple ):
'''simple docstring'''
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Tuple ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def lowercase__ ( self : Union[str, Any], lowerCamelCase : List[Any], lowerCamelCase : Any, lowerCamelCase : Any=False ):
'''simple docstring'''
lowercase__ = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase__ ( self : Optional[Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCamelCase )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
lowercase__ = model(**lowerCamelCase ).loss
loss.backward()
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCamelCase ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type['title']}""" ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
lowercase__ = self._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCamelCase ) as warning_list:
lowercase__ = model(**lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : int ):
'''simple docstring'''
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCAmelCase : Tuple = None
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCAmelCase : List[str] = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
UpperCAmelCase : int = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
UpperCAmelCase : Tuple = '▁'
# Segments (not really needed)
UpperCAmelCase : str = 0
UpperCAmelCase : Union[str, Any] = 1
UpperCAmelCase : Dict = 2
UpperCAmelCase : Optional[Any] = 3
UpperCAmelCase : int = 4
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = "left"
lowerCAmelCase__ = XLNetTokenizer
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : List[Any]="<s>" , __SCREAMING_SNAKE_CASE : int="</s>" , __SCREAMING_SNAKE_CASE : Union[str, Any]="<unk>" , __SCREAMING_SNAKE_CASE : Tuple="<sep>" , __SCREAMING_SNAKE_CASE : Dict="<pad>" , __SCREAMING_SNAKE_CASE : Any="<cls>" , __SCREAMING_SNAKE_CASE : List[str]="<mask>" , __SCREAMING_SNAKE_CASE : Tuple=["<eop>", "<eod>"] , **__SCREAMING_SNAKE_CASE : str , ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
vocab_file=__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , remove_space=__SCREAMING_SNAKE_CASE , keep_accents=__SCREAMING_SNAKE_CASE , bos_token=__SCREAMING_SNAKE_CASE , eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = do_lower_case
__SCREAMING_SNAKE_CASE = remove_space
__SCREAMING_SNAKE_CASE = keep_accents
__SCREAMING_SNAKE_CASE = vocab_file
__SCREAMING_SNAKE_CASE = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [self.sep_token_id]
__SCREAMING_SNAKE_CASE = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__SCREAMING_SNAKE_CASE = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 331
|
'''simple docstring'''
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = len(a__ )
while cur > 1:
# Find the maximum number in arr
__SCREAMING_SNAKE_CASE = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__SCREAMING_SNAKE_CASE = arr[mi::-1] + arr[mi + 1 : len(a__ )]
# Reverse whole list
__SCREAMING_SNAKE_CASE = arr[cur - 1 :: -1] + arr[cur : len(a__ )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : str = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 331
| 1
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : int=0.0 , snake_case__ : Optional[Any] = None , snake_case__ : str = "geglu" , snake_case__ : Tuple = None , snake_case__ : Tuple = False , snake_case__ : List[str] = False , snake_case__ : Any = False , snake_case__ : Union[str, Any] = False , snake_case__ : Optional[int] = True , snake_case__ : Tuple = "layer_norm" , snake_case__ : Any = False , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = only_cross_attention
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_UpperCAmelCase = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
F""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_UpperCAmelCase = AdaLayerNorm(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase = AdaLayerNormZero(snake_case__ , snake_case__ )
else:
_UpperCAmelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
_UpperCAmelCase = Attention(
query_dim=snake_case__ , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=snake_case__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_UpperCAmelCase = (
AdaLayerNorm(snake_case__ , snake_case__ )
if self.use_ada_layer_norm
else nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
)
_UpperCAmelCase = Attention(
query_dim=snake_case__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=snake_case__ , dim_head=snake_case__ , dropout=snake_case__ , bias=snake_case__ , upcast_attention=snake_case__ , ) # is self-attn if encoder_hidden_states is none
else:
_UpperCAmelCase = None
_UpperCAmelCase = None
# 3. Feed-forward
_UpperCAmelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
_UpperCAmelCase = FeedForward(snake_case__ , dropout=snake_case__ , activation_fn=snake_case__ , final_dropout=snake_case__ )
# let chunk size default to None
_UpperCAmelCase = None
_UpperCAmelCase = 0
def UpperCamelCase ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] ):
"""simple docstring"""
_UpperCAmelCase = chunk_size
_UpperCAmelCase = dim
def UpperCamelCase ( self : Dict , snake_case__ : Tuple , snake_case__ : str = None , snake_case__ : str = None , snake_case__ : List[str] = None , snake_case__ : Dict = None , snake_case__ : Tuple = None , snake_case__ : Optional[int] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
_UpperCAmelCase = self.norma(snake_case__ , snake_case__ )
elif self.use_ada_layer_norm_zero:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.norma(
snake_case__ , snake_case__ , snake_case__ , hidden_dtype=hidden_states.dtype )
else:
_UpperCAmelCase = self.norma(snake_case__ )
_UpperCAmelCase = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_UpperCAmelCase = self.attna(
snake_case__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=snake_case__ , **snake_case__ , )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_msa.unsqueeze(1 ) * attn_output
_UpperCAmelCase = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_UpperCAmelCase = (
self.norma(snake_case__ , snake_case__ ) if self.use_ada_layer_norm else self.norma(snake_case__ )
)
_UpperCAmelCase = self.attna(
snake_case__ , encoder_hidden_states=snake_case__ , attention_mask=snake_case__ , **snake_case__ , )
_UpperCAmelCase = attn_output + hidden_states
# 3. Feed-forward
_UpperCAmelCase = self.norma(snake_case__ )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
_UpperCAmelCase = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_UpperCAmelCase = torch.cat(
[self.ff(snake_case__ ) for hid_slice in norm_hidden_states.chunk(snake_case__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
_UpperCAmelCase = self.ff(snake_case__ )
if self.use_ada_layer_norm_zero:
_UpperCAmelCase = gate_mlp.unsqueeze(1 ) * ff_output
_UpperCAmelCase = ff_output + hidden_states
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , snake_case__ : List[str] , snake_case__ : Any = None , snake_case__ : Dict = 4 , snake_case__ : Tuple = 0.0 , snake_case__ : int = "geglu" , snake_case__ : Optional[int] = False , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = int(dim * mult )
_UpperCAmelCase = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_UpperCAmelCase = GELU(snake_case__ , snake_case__ )
if activation_fn == "gelu-approximate":
_UpperCAmelCase = GELU(snake_case__ , snake_case__ , approximate="tanh" )
elif activation_fn == "geglu":
_UpperCAmelCase = GEGLU(snake_case__ , snake_case__ )
elif activation_fn == "geglu-approximate":
_UpperCAmelCase = ApproximateGELU(snake_case__ , snake_case__ )
_UpperCAmelCase = nn.ModuleList([] )
# project in
self.net.append(snake_case__ )
# project dropout
self.net.append(nn.Dropout(snake_case__ ) )
# project out
self.net.append(nn.Linear(snake_case__ , snake_case__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(snake_case__ ) )
def UpperCamelCase ( self : List[str] , snake_case__ : Tuple ):
"""simple docstring"""
for module in self.net:
_UpperCAmelCase = module(snake_case__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] = "none" ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(snake_case__ , snake_case__ )
_UpperCAmelCase = approximate
def UpperCamelCase ( self : Optional[int] , snake_case__ : List[Any] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def UpperCamelCase ( self : int , snake_case__ : Any ):
"""simple docstring"""
_UpperCAmelCase = self.proj(snake_case__ )
_UpperCAmelCase = self.gelu(snake_case__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : str ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(snake_case__ , dim_out * 2 )
def UpperCamelCase ( self : Tuple , snake_case__ : Optional[Any] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(snake_case__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def UpperCamelCase ( self : Dict , snake_case__ : Dict ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.proj(snake_case__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(snake_case__ )
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Linear(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple , snake_case__ : Dict ):
"""simple docstring"""
_UpperCAmelCase = self.proj(snake_case__ )
return x * torch.sigmoid(1.702 * x )
class __lowerCAmelCase ( nn.Module ):
def __init__( self : List[str] , snake_case__ : Dict , snake_case__ : List[Any] ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = nn.Embedding(snake_case__ , snake_case__ )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(snake_case__ , embedding_dim * 2 )
_UpperCAmelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ )
def UpperCamelCase ( self : Any , snake_case__ : Optional[int] , snake_case__ : int ):
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(snake_case__ ) ) )
_UpperCAmelCase , _UpperCAmelCase = torch.chunk(snake_case__ , 2 )
_UpperCAmelCase = self.norm(snake_case__ ) * (1 + scale) + shift
return x
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Any , snake_case__ : Dict , snake_case__ : str ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = CombinedTimestepLabelEmbeddings(snake_case__ , snake_case__ )
_UpperCAmelCase = nn.SiLU()
_UpperCAmelCase = nn.Linear(snake_case__ , 6 * embedding_dim , bias=snake_case__ )
_UpperCAmelCase = nn.LayerNorm(snake_case__ , elementwise_affine=snake_case__ , eps=1e-6 )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any=None ):
"""simple docstring"""
_UpperCAmelCase = self.linear(self.silu(self.emb(snake_case__ , snake_case__ , hidden_dtype=snake_case__ ) ) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = emb.chunk(6 , dim=1 )
_UpperCAmelCase = self.norm(snake_case__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class __lowerCAmelCase ( nn.Module ):
def __init__( self : Tuple , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Optional[int] = None , snake_case__ : Dict = 1e-5 ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = num_groups
_UpperCAmelCase = eps
if act_fn is None:
_UpperCAmelCase = None
else:
_UpperCAmelCase = get_activation(snake_case__ )
_UpperCAmelCase = nn.Linear(snake_case__ , out_dim * 2 )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
if self.act:
_UpperCAmelCase = self.act(snake_case__ )
_UpperCAmelCase = self.linear(snake_case__ )
_UpperCAmelCase = emb[:, :, None, None]
_UpperCAmelCase , _UpperCAmelCase = emb.chunk(2 , dim=1 )
_UpperCAmelCase = F.group_norm(snake_case__ , self.num_groups , eps=self.eps )
_UpperCAmelCase = x * (1 + scale) + shift
return x
| 133
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase__:
def __init__( self : Any , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : float = 0 )-> None:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = row, column
UpperCAmelCase = [[default_value for c in range(lowerCAmelCase )] for r in range(lowerCAmelCase )]
def __str__( self : int )-> str:
"""simple docstring"""
UpperCAmelCase = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
UpperCAmelCase = 0
for row_vector in self.array:
for obj in row_vector:
UpperCAmelCase = max(lowerCAmelCase , len(str(lowerCAmelCase ) ) )
UpperCAmelCase = F"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCAmelCase : list[float] ) -> str:
nonlocal string_format_identifier
UpperCAmelCase = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCAmelCase ) for row_vector in self.array )
return s
def __repr__( self : Tuple )-> str:
"""simple docstring"""
return str(self )
def a__( self : str , lowerCAmelCase : tuple[int, int] )-> bool:
"""simple docstring"""
if not (isinstance(lowerCAmelCase , (list, tuple) ) and len(lowerCAmelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : int , lowerCAmelCase : tuple[int, int] )-> Any:
"""simple docstring"""
assert self.validate_indicies(lowerCAmelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : List[str] , lowerCAmelCase : tuple[int, int] , lowerCAmelCase : float )-> None:
"""simple docstring"""
assert self.validate_indicies(lowerCAmelCase )
UpperCAmelCase = value
def __add__( self : int , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == another.row and self.column == another.column
# Add
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c] + another[r, c]
return result
def __neg__( self : Dict )-> Matrix:
"""simple docstring"""
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = -self[r, c]
return result
def __sub__( self : Union[str, Any] , lowerCAmelCase : Matrix )-> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : Union[str, Any] , lowerCAmelCase : int | float | Matrix )-> Matrix:
"""simple docstring"""
if isinstance(lowerCAmelCase , (int, float) ): # Scalar multiplication
UpperCAmelCase = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c] * another
return result
elif isinstance(lowerCAmelCase , lowerCAmelCase ): # Matrix multiplication
assert self.column == another.row
UpperCAmelCase = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
UpperCAmelCase = F"""Unsupported type given for another ({type(lowerCAmelCase )})"""
raise TypeError(lowerCAmelCase )
def a__( self : Optional[Any] )-> Matrix:
"""simple docstring"""
UpperCAmelCase = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
UpperCAmelCase = self[r, c]
return result
def a__( self : Tuple , lowerCAmelCase : Matrix , lowerCAmelCase : Matrix )-> Any:
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and isinstance(lowerCAmelCase , lowerCAmelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
UpperCAmelCase = v.transpose()
UpperCAmelCase = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = Matrix(3 , 3 , 0 )
for i in range(3 ):
UpperCAmelCase = 1
print(f"""a^(-1) is {ainv}""" )
# u, v
UpperCAmelCase = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 1, 2, -3
UpperCAmelCase = Matrix(3 , 1 , 0 )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = 4, -2, 5
print(f"""u is {u}""" )
print(f"""v is {v}""" )
print(f"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(f"""(a + uv^T)^(-1) is {ainv.sherman_morrison(A , A )}""" )
def lowerCamelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 91
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase__( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
__magic_name__ : List[str] = StableDiffusionSAGPipeline
__magic_name__ : str = TEXT_TO_IMAGE_PARAMS
__magic_name__ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ : List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
__magic_name__ : str = False
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def a__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : Tuple=0 )-> str:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def a__( self : Any )-> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
UpperCAmelCase = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def a__( self : int )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' )
UpperCAmelCase = output.images
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = StableDiffusionSAGPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
UpperCAmelCase = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = '''.'''
UpperCAmelCase = torch.manual_seed(0 )
UpperCAmelCase = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='''np''' , )
UpperCAmelCase = output.images
assert image.shape == (1, 512, 768, 3)
| 91
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase__ )
class _a ( UpperCamelCase__ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
_lowercase : str = field(default='''question-answering-extractive''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowercase : ClassVar[Features] = Features({'''question''': Value('''string''' ), '''context''': Value('''string''' )} )
_lowercase : ClassVar[Features] = Features(
{
'''answers''': Sequence(
{
'''text''': Value('''string''' ),
'''answer_start''': Value('''int32''' ),
} )
} )
_lowercase : str = "question"
_lowercase : str = "context"
_lowercase : str = "answers"
@property
def lowerCamelCase_ ( self: List[Any] ) -> Dict[str, str]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 110
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCAmelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowerCAmelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowerCAmelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as f:
lowercase__ = Image.open(SCREAMING_SNAKE_CASE )
return im.convert('''RGB''' )
@dataclass
class _a :
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={
'''help''': '''Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'''
} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the training data.'''} )
_lowercase : Optional[str] = field(default=UpperCamelCase__ , metadata={'''help''': '''A folder containing the validation data.'''} )
_lowercase : Optional[float] = field(
default=0.15 , metadata={'''help''': '''Percent to split off of train for validation.'''} )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
'''You must specify either a dataset name from the hub or a train and/or validation directory.''' )
@dataclass
class _a :
_lowercase : str = field(
default='''google/vit-base-patch16-224-in21k''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(UpperCamelCase__ )} , )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowercase : Optional[str] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from s3'''} )
_lowercase : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowercase : str = field(default=UpperCamelCase__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = torch.stack([example['''pixel_values'''] for example in examples] )
lowercase__ = torch.tensor([example['''labels'''] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def _a ( ):
"""simple docstring"""
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_image_classification''' , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task='''image-classification''' , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowercase__ = {}
if data_args.train_dir is not None:
lowercase__ = os.path.join(data_args.train_dir , '''**''' )
if data_args.validation_dir is not None:
lowercase__ = os.path.join(data_args.validation_dir , '''**''' )
lowercase__ = load_dataset(
'''imagefolder''' , data_files=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , task='''image-classification''' , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if '''validation''' in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = dataset['''train'''].train_test_split(data_args.train_val_split )
lowercase__ = split['''train''']
lowercase__ = split['''test''']
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowercase__ = dataset['''train'''].features['''labels'''].names
lowercase__ , lowercase__ = {}, {}
for i, label in enumerate(SCREAMING_SNAKE_CASE ):
lowercase__ = str(SCREAMING_SNAKE_CASE )
lowercase__ = label
# Load the accuracy metric from the datasets package
lowercase__ = evaluate.load('''accuracy''' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowercase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , finetuning_task='''image-classification''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowercase__ = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowercase__ = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['''shortest_edge''']
else:
lowercase__ = (image_processor.size['''height'''], image_processor.size['''width'''])
lowercase__ = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowercase__ = Compose(
[
RandomResizedCrop(SCREAMING_SNAKE_CASE ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowercase__ = Compose(
[
Resize(SCREAMING_SNAKE_CASE ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
normalize,
] )
def train_transforms(SCREAMING_SNAKE_CASE ):
lowercase__ = [
_train_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']
]
return example_batch
def val_transforms(SCREAMING_SNAKE_CASE ):
lowercase__ = [_val_transforms(pil_img.convert('''RGB''' ) ) for pil_img in example_batch['''image''']]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError('''--do_train requires a train dataset''' )
if data_args.max_train_samples is not None:
lowercase__ = (
dataset['''train'''].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError('''--do_eval requires a validation dataset''' )
if data_args.max_eval_samples is not None:
lowercase__ = (
dataset['''validation'''].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(SCREAMING_SNAKE_CASE )
# Initalize our trainer
lowercase__ = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=dataset['''train'''] if training_args.do_train else None , eval_dataset=dataset['''validation'''] if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('''train''' , train_result.metrics )
trainer.save_metrics('''train''' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('''eval''' , SCREAMING_SNAKE_CASE )
trainer.save_metrics('''eval''' , SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''image-classification''',
'''dataset''': data_args.dataset_name,
'''tags''': ['''image-classification''', '''vision'''],
}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 110
| 1
|
def __magic_name__ ( __lowerCAmelCase : int = 50 ) -> List[Any]:
__lowerCamelCase = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(F'{solution() = }')
| 353
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"facebook/xmod-base": "https://huggingface.co/facebook/xmod-base/resolve/main/config.json",
"facebook/xmod-large-prenorm": "https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json",
"facebook/xmod-base-13-125k": "https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json",
"facebook/xmod-base-30-125k": "https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json",
"facebook/xmod-base-30-195k": "https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json",
"facebook/xmod-base-60-125k": "https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json",
"facebook/xmod-base-60-265k": "https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json",
"facebook/xmod-base-75-125k": "https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json",
"facebook/xmod-base-75-269k": "https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json",
}
class lowerCAmelCase__ ( __lowercase ):
a__ : Dict = """xmod"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_05_22 , SCREAMING_SNAKE_CASE__ : str=7_68 , SCREAMING_SNAKE_CASE__ : int=12 , SCREAMING_SNAKE_CASE__ : Dict=12 , SCREAMING_SNAKE_CASE__ : List[str]=30_72 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=5_12 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[Any]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=1 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=("en_XX",) , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , **SCREAMING_SNAKE_CASE__ : int , ) -> str:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = position_embedding_type
__lowerCamelCase = use_cache
__lowerCamelCase = classifier_dropout
__lowerCamelCase = pre_norm
__lowerCamelCase = adapter_reduction_factor
__lowerCamelCase = adapter_layer_norm
__lowerCamelCase = adapter_reuse_layer_norm
__lowerCamelCase = ln_before_adapter
__lowerCamelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = default_language
class lowerCAmelCase__ ( __lowercase ):
@property
def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__lowerCamelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 339
| 0
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a = get_logger()
__a = None
class __SCREAMING_SNAKE_CASE ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ):
super().__init__(features=SCREAMING_SNAKE_CASE__ )
import jax
from jaxlib.xla_client import Device
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(SCREAMING_SNAKE_CASE__ )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
lowercase : int = device if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase : List[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
lowercase : Any = str(jax.devices()[0] )
lowercase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def __lowerCamelCase ( ):
import jax
return {str(SCREAMING_SNAKE_CASE__ ): device for device in jax.devices()}
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(SCREAMING_SNAKE_CASE__ , axis=0 )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
import jax.numpy as jnp
if isinstance(SCREAMING_SNAKE_CASE__ , (str, bytes, type(SCREAMING_SNAKE_CASE__ )) ):
return value
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : int = {}
if isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowercase : str = {'''dtype''': jnp.intaa}
else:
lowercase : Optional[int] = {'''dtype''': jnp.intaa}
elif isinstance(SCREAMING_SNAKE_CASE__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : Optional[int] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE__ , PIL.Image.Image ):
lowercase : int = np.asarray(SCREAMING_SNAKE_CASE__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowercase : List[Any] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(SCREAMING_SNAKE_CASE__ , **{**default_dtype, **self.jnp_array_kwargs} )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(SCREAMING_SNAKE_CASE__ , '''__array__''' ) and not isinstance(SCREAMING_SNAKE_CASE__ , jax.Array ):
lowercase : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
elif isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE__ ) for substruct in data_struct] )
return self._tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE__ , map_list=SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : str = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE__ )
return self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE__ )
lowercase : int = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE__ , pa_table.column_names[0] )
lowercase : str = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = self._consolidate(SCREAMING_SNAKE_CASE__ )
return column
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[int] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = self.recursive_tensorize(SCREAMING_SNAKE_CASE__ )
for column_name in batch:
lowercase : Any = self._consolidate(batch[column_name] )
return batch
| 337
|
from __future__ import annotations
def __lowercase ( _UpperCamelCase ) ->float:
"""simple docstring"""
if not nums:
raise ValueError('''List is empty''' )
return sum(_UpperCamelCase ) / len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=19 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=37 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
"""simple docstring"""
UpperCamelCase : str = parent
UpperCamelCase : Tuple = batch_size
UpperCamelCase : Any = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Tuple = use_input_mask
UpperCamelCase : Union[str, Any] = use_token_type_ids
UpperCamelCase : Union[str, Any] = use_labels
UpperCamelCase : str = vocab_size
UpperCamelCase : Dict = hidden_size
UpperCamelCase : List[str] = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Optional[int] = intermediate_size
UpperCamelCase : Optional[Any] = hidden_act
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : int = attention_probs_dropout_prob
UpperCamelCase : Dict = max_position_embeddings
UpperCamelCase : Dict = type_vocab_size
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : Optional[Any] = initializer_range
UpperCamelCase : int = num_labels
UpperCamelCase : List[str] = num_choices
UpperCamelCase : Tuple = scope
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Union[str, Any] = None
if self.use_input_mask:
UpperCamelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : List[str] = None
UpperCamelCase : List[Any] = None
if self.use_labels:
UpperCamelCase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__SCREAMING_SNAKE_CASE , esmfold_config={'''trunk''': {'''num_blocks''': 2}, '''fp16_esm''': False} , )
return config
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Dict = EsmForProteinFolding(config=__SCREAMING_SNAKE_CASE ).float()
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Dict = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[str] = config_and_inputs
UpperCamelCase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
__UpperCamelCase : int = ()
__UpperCamelCase : Dict = {} if is_torch_available() else {}
__UpperCamelCase : str = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : str = EsmFoldModelTester(self )
UpperCamelCase : str = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Does not support attention outputs''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Esm does not support embedding resizing''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support passing input embeds!''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support head pruning.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMfold does not output hidden states in the normal way.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold only has one output format.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''This test doesn\'t work for ESMFold and doesn\'t test core functionality''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold does not support input chunking.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support torchscript compilation.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''ESMFold doesn\'t support data parallel.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@require_torch
class UpperCAmelCase_ ( _a):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = EsmForProteinFolding.from_pretrained('''facebook/esmfold_v1''' ).float()
model.eval()
UpperCamelCase : Optional[Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(__SCREAMING_SNAKE_CASE )['''positions''']
UpperCamelCase : List[str] = torch.tensor([2.5_828, 0.7_993, -10.9_334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 315
|
import collections
import os
import re
from pathlib import Path
__UpperCAmelCase : List[str] = "src/transformers"
# Matches is_xxx_available()
__UpperCAmelCase : int = re.compile(r"is\_([a-z_]*)_available()")
# Catches a one-line _import_struct = {xxx}
__UpperCAmelCase : Optional[int] = re.compile(r"^_import_structure\s+=\s+\{([^\}]+)\}")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
__UpperCAmelCase : List[Any] = re.compile(r"\s+\"\S*\":\s+\[([^\]]*)\]")
# Catches a line if not is_foo_available
__UpperCAmelCase : List[Any] = re.compile(r"^\s*if\s+not\s+is\_[a-z_]*\_available\(\)")
# Catches a line _import_struct["bla"].append("foo")
__UpperCAmelCase : str = re.compile(r"^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
__UpperCAmelCase : Union[str, Any] = re.compile(r"^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]")
# Catches a line with an object between quotes and a comma: "MyModel",
__UpperCAmelCase : Dict = re.compile(r"^\s+\"([^\"]+)\",")
# Catches a line with objects between brackets only: ["foo", "bar"],
__UpperCAmelCase : str = re.compile(r"^\s+\[([^\]]+)\]")
# Catches a line with from foo import bar, bla, boo
__UpperCAmelCase : str = re.compile(r"\s+from\s+\S*\s+import\s+([^\(\s].*)\n")
# Catches a line with try:
__UpperCAmelCase : Any = re.compile(r"^\s*try:")
# Catches a line with else:
__UpperCAmelCase : List[Any] = re.compile(r"^\s*else:")
def a ( SCREAMING_SNAKE_CASE_ : Dict ):
"""simple docstring"""
if _re_test_backend.search(SCREAMING_SNAKE_CASE_ ) is None:
return None
UpperCamelCase : Union[str, Any] = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE_ )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE_ )
def a ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCamelCase : Tuple = f.readlines()
UpperCamelCase : Tuple = 0
while line_index < len(SCREAMING_SNAKE_CASE_ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE_ ):
return None
# First grab the objects without a specific backend in _import_structure
UpperCamelCase : List[Any] = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
UpperCamelCase : Optional[int] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[int] = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE_ ).groups()[0]
UpperCamelCase : str = re.findall(R'''\[([^\]]+)\]''' , SCREAMING_SNAKE_CASE_ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
UpperCamelCase : List[Any] = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
UpperCamelCase : List[str] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
UpperCamelCase : Dict = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
UpperCamelCase : Dict = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : str = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
UpperCamelCase : str = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : Union[str, Any] = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : List[Any] = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ) is not None:
UpperCamelCase : str = _re_between_brackets.search(SCREAMING_SNAKE_CASE_ ).groups()[0].split(''', ''' )
UpperCamelCase : Dict = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE_ ) > 0]
objects.extend(SCREAMING_SNAKE_CASE_ )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE_ ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE_ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
UpperCamelCase : Tuple = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
UpperCamelCase : int = []
while (
line_index < len(SCREAMING_SNAKE_CASE_ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
UpperCamelCase : Tuple = lines[line_index]
UpperCamelCase : Any = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
UpperCamelCase : Any = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE_ ):
# If the line is an if is_backend_available, we grab all objects associated.
UpperCamelCase : Optional[Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
UpperCamelCase : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
UpperCamelCase : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
UpperCamelCase : Optional[Any] = lines[line_index]
UpperCamelCase : str = _re_import.search(SCREAMING_SNAKE_CASE_ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
UpperCamelCase : str = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
def find_duplicates(SCREAMING_SNAKE_CASE_ : Any ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE_ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
UpperCamelCase : Dict = []
for key in import_dict_objects.keys():
UpperCamelCase : Union[str, Any] = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
UpperCamelCase : Dict = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
UpperCamelCase : List[str] = '''base imports''' if key == '''none''' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ):
"""simple docstring"""
UpperCamelCase : Any = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
UpperCamelCase : int = os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' )
UpperCamelCase : Optional[int] = parse_init(SCREAMING_SNAKE_CASE_ )
if objects is not None:
UpperCamelCase : str = analyze_results(*SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : List[Any] = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(SCREAMING_SNAKE_CASE_ ) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError('''\n\n'''.join(SCREAMING_SNAKE_CASE_ ) )
def a ( ):
"""simple docstring"""
UpperCamelCase : Dict = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE_ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(SCREAMING_SNAKE_CASE_ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE_ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
UpperCamelCase : List[str] = str((Path(SCREAMING_SNAKE_CASE_ ) / folder).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : str = short_path.replace(os.path.sep , '''.''' )
submodules.append(SCREAMING_SNAKE_CASE_ )
for fname in files:
if fname == "__init__.py":
continue
UpperCamelCase : Tuple = str((Path(SCREAMING_SNAKE_CASE_ ) / fname).relative_to(SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase : int = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE_ )
return submodules
__UpperCAmelCase : Optional[int] = [
"convert_pytorch_checkpoint_to_tf2",
"modeling_flax_pytorch_utils",
"models.esm.openfold_utils",
]
def a ( ):
"""simple docstring"""
from transformers.utils import direct_transformers_import
UpperCamelCase : Tuple = direct_transformers_import(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(SCREAMING_SNAKE_CASE_ , '''__init__.py''' ) , '''r''' ) as f:
UpperCamelCase : List[Any] = f.read()
import_structure_keys.update(set(re.findall(R'''import_structure\[\"([^\"]*)\"\]''' , SCREAMING_SNAKE_CASE_ ) ) )
UpperCamelCase : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
UpperCamelCase : str = '''\n'''.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
F"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 315
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase :Union[str, Any] = None
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
lowerCAmelCase :int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase :str = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase :str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCAmelCase :List[Any] = '''▁'''
# Segments (not really needed)
lowerCAmelCase :Optional[Any] = 0
lowerCAmelCase :Any = 1
lowerCAmelCase :Optional[Any] = 2
lowerCAmelCase :Optional[Any] = 3
lowerCAmelCase :Optional[Any] = 4
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Union[str, Any] = """left"""
A_ : Union[str, Any] = XLNetTokenizer
def __init__( self : Tuple , _A : Dict=None , _A : Optional[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : Optional[Any]=False , _A : Dict="<s>" , _A : List[Any]="</s>" , _A : List[str]="<unk>" , _A : int="<sep>" , _A : Dict="<pad>" , _A : List[str]="<cls>" , _A : Tuple="<mask>" , _A : int=["<eop>", "<eod>"] , **_A : Optional[int] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
__magic_name__ : str = 3
__magic_name__ : Tuple = do_lower_case
__magic_name__ : Union[str, Any] = remove_space
__magic_name__ : List[Any] = keep_accents
__magic_name__ : Union[str, Any] = vocab_file
__magic_name__ : int = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 331
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
| 331
| 1
|
"""simple docstring"""
from collections.abc import Callable
def lowerCamelCase_ (UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float , UpperCamelCase__ : float ):
_UpperCAmelCase : float = a
_UpperCAmelCase : float = b
if function(UpperCamelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(UpperCamelCase__ ) == 0:
return b
elif (
function(UpperCamelCase__ ) * function(UpperCamelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
_UpperCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(UpperCamelCase__ ) == 0:
return mid
elif function(UpperCamelCase__ ) * function(UpperCamelCase__ ) < 0:
_UpperCAmelCase : Tuple = mid
else:
_UpperCAmelCase : Dict = mid
_UpperCAmelCase : Optional[int] = start + (end - start) / 2.0
return mid
def lowerCamelCase_ (UpperCamelCase__ : float ):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 68
|
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowerCamelCase_ ():
_UpperCAmelCase : int = HfArgumentParser(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase : Union[str, Any] = TensorFlowBenchmark(args=UpperCamelCase__ )
try:
_UpperCAmelCase : Optional[int] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase : List[Any] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
_UpperCAmelCase : Tuple = ''' '''.join(str(UpperCamelCase__ ).split(''' ''' )[:-1] )
_UpperCAmelCase : int = ''''''
_UpperCAmelCase : List[Any] = eval(str(UpperCamelCase__ ).split(''' ''' )[-1] )
_UpperCAmelCase : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
_UpperCAmelCase : Union[str, Any] = full_error_msg + begin_error_msg + str(UpperCamelCase__ )
raise ValueError(UpperCamelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 68
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = ShapEPipeline
__UpperCamelCase = ["prompt"]
__UpperCamelCase = ["prompt"]
__UpperCamelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__UpperCamelCase = False
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return 32
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
return 8
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
return tokenizer
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : Optional[Any] = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = PriorTransformer(**lowercase_)
return model
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
torch.manual_seed(0)
SCREAMING_SNAKE_CASE_ : List[Any] = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
SCREAMING_SNAKE_CASE_ : Optional[Any] = ShapERenderer(**lowercase_)
return model
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.dummy_prior
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE_ : Any = self.dummy_tokenizer
SCREAMING_SNAKE_CASE_ : int = self.dummy_renderer
SCREAMING_SNAKE_CASE_ : str = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowercase_ , clip_sample=lowercase_ , clip_sample_range=1.0 , )
SCREAMING_SNAKE_CASE_ : str = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : str , lowercase_ : int=0):
'''simple docstring'''
if str(lowercase_).startswith('''mps'''):
SCREAMING_SNAKE_CASE_ : str = torch.manual_seed(lowercase_)
else:
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=lowercase_).manual_seed(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = '''cpu'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : List[str] = self.pipeline_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = pipe(**self.get_dummy_inputs(lowercase_))
SCREAMING_SNAKE_CASE_ : List[str] = output.images[0]
SCREAMING_SNAKE_CASE_ : str = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
SCREAMING_SNAKE_CASE_ : Any = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = torch_device == '''cpu'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase_ , relax_max_difference=lowercase_ , )
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ : Optional[int] = self.pipeline_class(**lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Optional[int] = 2
SCREAMING_SNAKE_CASE_ : List[str] = self.get_dummy_inputs(lowercase_)
for key in inputs.keys():
if key in self.batch_params:
SCREAMING_SNAKE_CASE_ : List[str] = batch_size * [inputs[key]]
SCREAMING_SNAKE_CASE_ : str = pipe(**lowercase_ , num_images_per_prompt=lowercase_)[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''')
SCREAMING_SNAKE_CASE_ : int = ShapEPipeline.from_pretrained('''openai/shap-e''')
SCREAMING_SNAKE_CASE_ : Optional[int] = pipe.to(lowercase_)
pipe.set_progress_bar_config(disable=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.Generator(device=lowercase_).manual_seed(0)
SCREAMING_SNAKE_CASE_ : Any = pipe(
'''a shark''' , generator=lowercase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_)
| 91
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 91
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""xlm-mlm-en-2048""": """https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json""",
"""xlm-mlm-ende-1024""": """https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-enfr-1024""": """https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json""",
"""xlm-mlm-enro-1024""": """https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json""",
"""xlm-mlm-tlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json""",
"""xlm-mlm-xnli15-1024""": """https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json""",
"""xlm-clm-enfr-1024""": """https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json""",
"""xlm-clm-ende-1024""": """https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json""",
"""xlm-mlm-17-1280""": """https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json""",
"""xlm-mlm-100-1280""": """https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json""",
}
class __lowercase ( A ):
'''simple docstring'''
_A : Tuple = '''xlm'''
_A : Optional[Any] = {
'''hidden_size''': '''emb_dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
'''n_words''': '''vocab_size''', # For backward compatibility
}
def __init__( self : List[str] , _a : Union[str, Any]=30_145 , _a : Dict=2_048 , _a : Any=12 , _a : Optional[Any]=16 , _a : List[Any]=0.1 , _a : List[str]=0.1 , _a : Any=True , _a : Dict=False , _a : Dict=False , _a : Any=False , _a : Union[str, Any]=1 , _a : Dict=True , _a : int=512 , _a : Optional[Any]=2_048**-0.5 , _a : List[str]=1E-12 , _a : Tuple=0.02 , _a : Dict=0 , _a : Dict=1 , _a : Optional[Any]=2 , _a : Union[str, Any]=3 , _a : int=5 , _a : Any=True , _a : int="first" , _a : Optional[Any]=True , _a : Union[str, Any]=None , _a : Tuple=True , _a : Any=0.1 , _a : Tuple=5 , _a : str=5 , _a : Any=0 , _a : str=0 , _a : Union[str, Any]=2 , _a : int=0 , **_a : Optional[int] , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = emb_dim
UpperCamelCase__ = n_layers
UpperCamelCase__ = n_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = gelu_activation
UpperCamelCase__ = sinusoidal_embeddings
UpperCamelCase__ = causal
UpperCamelCase__ = asm
UpperCamelCase__ = n_langs
UpperCamelCase__ = use_lang_emb
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bos_index
UpperCamelCase__ = eos_index
UpperCamelCase__ = pad_index
UpperCamelCase__ = unk_index
UpperCamelCase__ = mask_index
UpperCamelCase__ = is_encoder
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = embed_init_std
UpperCamelCase__ = init_std
UpperCamelCase__ = summary_type
UpperCamelCase__ = summary_use_proj
UpperCamelCase__ = summary_activation
UpperCamelCase__ = summary_proj_to_labels
UpperCamelCase__ = summary_first_dropout
UpperCamelCase__ = start_n_top
UpperCamelCase__ = end_n_top
UpperCamelCase__ = mask_token_id
UpperCamelCase__ = lang_id
if "n_words" in kwargs:
UpperCamelCase__ = kwargs['''n_words''']
super().__init__(pad_token_id=_a , bos_token_id=_a , **_a )
class __lowercase ( A ):
'''simple docstring'''
@property
def A_ ( self : Optional[int] ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 35
|
from __future__ import annotations
lowercase = list[list[int]]
# assigning initial values to the grid
lowercase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowercase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix, UpperCamelCase__ : int, UpperCamelCase__ : int, UpperCamelCase__ : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
if location := find_empty_location(UpperCamelCase__ ):
UpperCamelCase__ , UpperCamelCase__ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1, 10 ):
if is_safe(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
UpperCamelCase__ = digit
if sudoku(UpperCamelCase__ ) is not None:
return grid
UpperCamelCase__ = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(UpperCamelCase__, end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 2_0)
print_solution(example_grid)
print("""\nExample grid solution:""")
lowercase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 35
| 1
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=16, lowerCAmelCase__=[32, 64, 128], lowerCAmelCase__=[1, 2, 1], lowerCAmelCase__=[2, 2, 4], lowerCAmelCase__=2, lowerCAmelCase__=2.0, lowerCAmelCase__=True, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__="gelu", lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=10, lowerCAmelCase__=8, lowerCAmelCase__=["stage1", "stage2"], lowerCAmelCase__=[1, 2], ) -> Optional[Any]:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = embed_dim
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = num_heads
snake_case_ = window_size
snake_case_ = mlp_ratio
snake_case_ = qkv_bias
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = drop_path_rate
snake_case_ = hidden_act
snake_case_ = use_absolute_embeddings
snake_case_ = patch_norm
snake_case_ = layer_norm_eps
snake_case_ = initializer_range
snake_case_ = is_training
snake_case_ = scope
snake_case_ = use_labels
snake_case_ = type_sequence_label_size
snake_case_ = encoder_stride
snake_case_ = out_features
snake_case_ = out_indices
def a_ ( self) -> Dict:
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size)
snake_case_ = self.get_config()
return config, pixel_values, labels
def a_ ( self) -> int:
return FocalNetConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, hidden_sizes=self.hidden_sizes, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, out_features=self.out_features, out_indices=self.out_indices, )
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = FocalNetModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
snake_case_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
snake_case_ = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]:
snake_case_ = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size, 8, 8])
# verify channels
self.parent.assertEqual(len(model.channels), len(config.out_features))
self.parent.assertListEqual(model.channels, config.hidden_sizes[:-1])
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = FocalNetBackbone(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps), 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape), [self.batch_size, self.image_size * 2, 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels), 1)
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]])
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]:
snake_case_ = FocalNetForMaskedImageModeling(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForMaskedImageModeling(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size))
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]:
snake_case_ = self.type_sequence_label_size
snake_case_ = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = model(lowerCAmelCase__, labels=lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
# test greyscale images
snake_case_ = 1
snake_case_ = FocalNetForImageClassification(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
snake_case_ = model(lowerCAmelCase__)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def a_ ( self) -> Any:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ = config_and_inputs
snake_case_ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> Any:
snake_case_ = FocalNetModelTester(self)
snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, embed_dim=37, has_text_modality=lowerCAmelCase__)
def a_ ( self) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self) -> List[str]:
return
def a_ ( self) -> Tuple:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase__)
def a_ ( self) -> List[Any]:
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__)
@unittest.skip(reason='FocalNet does not use inputs_embeds')
def a_ ( self) -> Optional[Any]:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking')
def a_ ( self) -> int:
pass
def a_ ( self) -> List[Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings(), (nn.Module))
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear))
def a_ ( self) -> str:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
snake_case_ = model_class(lowerCAmelCase__)
snake_case_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['pixel_values']
self.assertListEqual(arg_names[:1], lowerCAmelCase__)
def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any:
snake_case_ = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__))
snake_case_ = outputs.hidden_states
snake_case_ = getattr(
self.model_tester, 'expected_num_hidden_layers', len(self.model_tester.depths) + 1)
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
# FocalNet has a different seq_length
snake_case_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
snake_case_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], )
snake_case_ = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__)
snake_case_ , snake_case_ , snake_case_ , snake_case_ = reshaped_hidden_states[0].shape
snake_case_ = (
reshaped_hidden_states[0].view(lowerCAmelCase__, lowerCAmelCase__, height * width).permute(0, 2, 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], )
def a_ ( self) -> Tuple:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = 3
snake_case_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size, collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
snake_case_ = (
config.patch_size
if isinstance(config.patch_size, collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
snake_case_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
snake_case_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
self.check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, (padded_height, padded_width))
@slow
def a_ ( self) -> List[str]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = FocalNetModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = _config_zero_init(lowerCAmelCase__)
for model_class in self.all_model_classes:
snake_case_ = model_class(config=lowerCAmelCase__)
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f'Parameter {name} of model {model_class} seems not properly initialized', )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def a_ ( self) -> Union[str, Any]:
# TODO update organization
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny') if is_vision_available() else None
@slow
def a_ ( self) -> str:
snake_case_ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny').to(lowerCAmelCase__)
snake_case_ = self.default_image_processor
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
snake_case_ = model(**lowerCAmelCase__)
# verify the logits
snake_case_ = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape, lowerCAmelCase__)
snake_case_ = torch.tensor([0.2166, -0.4368, 0.2191]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCAmelCase__, atol=1e-4))
self.assertTrue(outputs.logits.argmax(dim=-1).item(), 281)
@require_torch
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = (FocalNetBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = FocalNetConfig
SCREAMING_SNAKE_CASE_ = False
def a_ ( self) -> int:
snake_case_ = FocalNetModelTester(self)
| 69
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(('34', '35', '37', '4', '5', '6') )
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = credit_card_number
_UpperCAmelCase = 0
_UpperCAmelCase = len(_UpperCAmelCase ) - 2
for i in range(_UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
_UpperCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCAmelCase = cc_number[:i] + str(_UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = F"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(F"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(_UpperCAmelCase ) <= 16:
print(F"{error_message} of its length." )
return False
if not validate_initial_digits(_UpperCAmelCase ):
print(F"{error_message} of its first two digits." )
return False
if not luhn_validation(_UpperCAmelCase ):
print(F"{error_message} it fails the Luhn check." )
return False
print(F"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 339
| 0
|
'''simple docstring'''
import math
import os
import sys
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :Tuple = ""
try:
with open(_UpperCamelCase ,"rb" ) as binary_file:
lowercase_ :List[Any] = binary_file.read()
for dat in data:
lowercase_ :str = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowerCamelCase : dict[str, str] ,__lowerCamelCase : str ,__lowerCamelCase : int ,__lowerCamelCase : str ):
lexicon.pop(_UpperCamelCase )
lowercase_ :Optional[int] = last_match_id
if math.loga(_UpperCamelCase ).is_integer():
for curr_key in lexicon:
lowercase_ :Tuple = "0" + lexicon[curr_key]
lowercase_ :Optional[int] = bin(_UpperCamelCase )[2:]
def UpperCAmelCase_ ( __lowerCamelCase : str ):
lowercase_ :Optional[Any] = {"0": "0", "1": "1"}
lowercase_ , lowercase_ :str = "", ""
lowercase_ :str = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ :int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
index += 1
lowercase_ :Union[str, Any] = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ :str = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Tuple = os.path.getsize(_UpperCamelCase )
lowercase_ :str = bin(_UpperCamelCase )[2:]
lowercase_ :int = len(_UpperCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :Tuple = 8
try:
with open(_UpperCamelCase ,"wb" ) as opened_file:
lowercase_ :int = [
to_write[i : i + byte_length]
for i in range(0 ,len(_UpperCamelCase ) ,_UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCamelCase ,2 ).to_bytes(1 ,byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def UpperCAmelCase_ ( __lowerCamelCase : str ,__lowerCamelCase : str ):
lowercase_ :str = read_file_binary(_UpperCamelCase )
lowercase_ :Optional[int] = compress_data(_UpperCamelCase )
lowercase_ :Optional[int] = add_file_length(_UpperCamelCase ,_UpperCamelCase )
write_file_binary(_UpperCamelCase ,_UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 359
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int ={
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str =['''CLIPFeatureExtractor''']
lowerCAmelCase : Optional[int] =['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any =[
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Union[str, Any] =[
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] =[
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 147
| 0
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
a = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _snake_case ( _snake_case : Tuple ) -> Dict:
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _snake_case ( _snake_case : str , _snake_case : List[Any] ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
elif args.student_type == "gpt2":
_A = False
def _snake_case ( _snake_case : str , _snake_case : int ) -> Tuple:
'''simple docstring'''
if args.student_type == "roberta":
_A = False
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_snake_case , required=_snake_case , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_snake_case , required=_snake_case , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_snake_case , choices=['distilbert', 'roberta', 'gpt2'] , required=_snake_case , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_snake_case , required=_snake_case , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_snake_case , type=_snake_case , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_snake_case , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_snake_case , required=_snake_case , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_snake_case , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_snake_case , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_snake_case , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_snake_case , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_snake_case , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_snake_case , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_snake_case , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_snake_case , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_snake_case , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_snake_case , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_snake_case , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_snake_case , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_snake_case , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_snake_case , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_snake_case , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_snake_case , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_snake_case , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5E-4 , type=_snake_case , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1E-6 , type=_snake_case , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_snake_case , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_snake_case , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_snake_case , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_snake_case , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_snake_case , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_snake_case , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_snake_case , default=5_00 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_snake_case , default=40_00 , help='Checkpoint interval.' )
_A = parser.parse_args()
sanity_checks(_snake_case )
# ARGS #
init_gpu_params(_snake_case )
set_seed(_snake_case )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(F'''Param: {args}''' )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_snake_case ) , _snake_case , indent=4 )
git_log(args.dump_path )
_A , _A , _A = MODEL_CLASSES[args.student_type]
_A , _A , _A = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
_A = teacher_tokenizer_class.from_pretrained(args.teacher_name )
_A = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
_A = tokenizer.all_special_tokens.index(_snake_case )
_A = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''' )
_A = special_tok_ids
_A = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''' )
with open(args.data_file , 'rb' ) as fp:
_A = pickle.load(_snake_case )
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , 'rb' ) as fp:
_A = pickle.load(_snake_case )
_A = np.maximum(_snake_case , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
_A = 0.0 # do not predict special tokens
_A = torch.from_numpy(_snake_case )
else:
_A = None
_A = LmSeqsDataset(params=_snake_case , data=_snake_case )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''' )
_A = student_config_class.from_pretrained(args.student_config )
_A = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''' )
_A = student_model_class.from_pretrained(args.student_pretrained_weights , config=_snake_case )
else:
_A = student_model_class(_snake_case )
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''' )
logger.info('Student loaded.' )
# TEACHER #
_A = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_snake_case )
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''' )
logger.info(F'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_snake_case , _snake_case )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_snake_case , _snake_case )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
_A = Distiller(
params=_snake_case , dataset=_snake_case , token_probs=_snake_case , student=_snake_case , teacher=_snake_case )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 315
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
_A = Vector()
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(_UpperCAmelCase ) , '(0,0,0,0,0,1)' )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = Vector([1, 2, 3, 4] )
self.assertEqual(len(_UpperCAmelCase ) , 4 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2] )
_A = Vector([1, 2, 3, 4, 5] )
_A = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
_A = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def lowerCAmelCase_ ( self : str ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def lowerCAmelCase_ ( self : int ):
_A = Vector([1, 2, 3] )
_A = Vector([2, -1, 4] ) # for test of dot product
_A = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '(3.0,6.0,9.0)' )
self.assertEqual((a * b) , 0 )
def lowerCAmelCase_ ( self : Dict ):
self.assertEqual(str(zero_vector(10 ) ).count('0' ) , 10 )
def lowerCAmelCase_ ( self : Tuple ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 2, 3] )
_A = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , _UpperCAmelCase , _UpperCAmelCase ) ) , '(3,4,7)' )
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = Vector([1, 0, 0, 0, 0, 0] )
_A = x.copy()
self.assertEqual(str(_UpperCAmelCase ) , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(_UpperCAmelCase ) , '(0,1,0)' )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('|1,2,3|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCAmelCase_ ( self : str ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
_A = Vector([1, 2, 3] )
self.assertEqual('(14,32,50)' , str(a * x ) )
self.assertEqual('|2,4,6|\n|8,10,12|\n|14,16,18|\n' , str(a * 2 ) )
def lowerCAmelCase_ ( self : Any ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('|1,2,5|\n|2,4,5|\n|6,7,8|\n' , str(_UpperCAmelCase ) )
def lowerCAmelCase_ ( self : List[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def lowerCAmelCase_ ( self : Tuple ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|2,4,10|\n|4,8,10|\n|12,14,18|\n' , str(a + b ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
_A = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
_A = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('|0,0,-4|\n|0,0,0|\n|0,0,-2|\n' , str(a - b ) )
def lowerCAmelCase_ ( self : int ):
self.assertEqual(
'|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 315
| 1
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
lowerCAmelCase__ : Tuple = Lock()
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0, 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
__UpperCAmelCase : Union[str, Any] = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
__UpperCAmelCase : str = min(_UpperCAmelCase, _UpperCAmelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_UpperCAmelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
__UpperCAmelCase : List[str] = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
__UpperCAmelCase : List[Any] = max(_UpperCAmelCase, _UpperCAmelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_UpperCAmelCase )
def __UpperCamelCase ( _UpperCAmelCase ):
__UpperCAmelCase : List[str] = []
__UpperCAmelCase : Dict = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
__UpperCAmelCase : List[Any] = Pipe()
__UpperCAmelCase : Any = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase, args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]), ) )
__UpperCAmelCase : Any = temp_rs
__UpperCAmelCase : List[Any] = temp_rr
for i in range(1, len(_UpperCAmelCase ) - 1 ):
__UpperCAmelCase : Union[str, Any] = Pipe()
__UpperCAmelCase : str = Pipe()
process_array_.append(
Process(
target=_UpperCAmelCase, args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]), ) )
__UpperCAmelCase : Dict = temp_rs
__UpperCAmelCase : List[str] = temp_rr
process_array_.append(
Process(
target=_UpperCAmelCase, args=(
len(_UpperCAmelCase ) - 1,
arr[len(_UpperCAmelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_UpperCAmelCase ) - 1],
), ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0, len(_UpperCAmelCase ) ):
__UpperCAmelCase : Any = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def __UpperCamelCase ( ):
__UpperCAmelCase : Tuple = list(range(10, 0, -1 ) )
print("Initial List" )
print(*_UpperCAmelCase )
__UpperCAmelCase : Union[str, Any] = odd_even_transposition(_UpperCAmelCase )
print("Sorted List\n" )
print(*_UpperCAmelCase )
if __name__ == "__main__":
main()
| 369
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase__ : Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(snake_case__ )
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[Any] , **UpperCAmelCase_ : Dict ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[str] , UpperCAmelCase_ : Union[str, List[str], "Image", List["Image"]] , **UpperCAmelCase_ : Tuple ):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Union[str, Any] = kwargs["candidate_labels"]
if "hypothesis_template" in kwargs:
__UpperCAmelCase : int = kwargs["hypothesis_template"]
return preprocess_params, {}, {}
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Optional[Any]="This is a photo of {}." ):
"""simple docstring"""
__UpperCAmelCase : Tuple = load_image(UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework )
__UpperCAmelCase : Dict = candidate_labels
__UpperCAmelCase : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
__UpperCAmelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = [text_inputs]
return inputs
def lowerCamelCase_ ( self : Tuple , UpperCAmelCase_ : Tuple ):
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = model_inputs.pop("candidate_labels" )
__UpperCAmelCase : str = model_inputs.pop("text_inputs" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
__UpperCAmelCase : Tuple = text_inputs[0]
else:
# Batching case.
__UpperCAmelCase : Optional[int] = text_inputs[0][0]
__UpperCAmelCase : Any = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
__UpperCAmelCase : Dict = {
"candidate_labels": candidate_labels,
"logits": outputs.logits_per_image,
}
return model_outputs
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase_ : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = model_outputs.pop("candidate_labels" )
__UpperCAmelCase : Tuple = model_outputs["logits"][0]
if self.framework == "pt":
__UpperCAmelCase : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
__UpperCAmelCase : Dict = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
__UpperCAmelCase : List[Any] = [scores]
elif self.framework == "tf":
__UpperCAmelCase : Union[str, Any] = stable_softmax(UpperCAmelCase_ , axis=-1 )
__UpperCAmelCase : List[str] = probs.numpy().tolist()
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__UpperCAmelCase : Dict = [
{"score": score, "label": candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda UpperCAmelCase_ : -x[0] )
]
return result
| 37
| 0
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCAmelCase__ = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCAmelCase__ = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] ) -> Optional[int]:
'''simple docstring'''
A__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=SCREAMING_SNAKE_CASE_ )[0]
@deprecated(SCREAMING_SNAKE_CASE_ , "Please use tf.data to implement this functionality." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE_ ) as bytestream:
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
if magic != 2_0_5_1:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
A__ = bytestream.read(rows * cols * num_images )
A__ = numpy.frombuffer(SCREAMING_SNAKE_CASE_ , dtype=numpy.uinta )
A__ = data.reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 )
return data
@deprecated(SCREAMING_SNAKE_CASE_ , "Please use tf.one_hot on tensors." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> Union[str, Any]:
'''simple docstring'''
A__ = labels_dense.shape[0]
A__ = numpy.arange(SCREAMING_SNAKE_CASE_ ) * num_classes
A__ = numpy.zeros((num_labels, num_classes) )
A__ = 1
return labels_one_hot
@deprecated(SCREAMING_SNAKE_CASE_ , "Please use tf.data to implement this functionality." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Any=False , SCREAMING_SNAKE_CASE_: Optional[int]=1_0 ) -> Tuple:
'''simple docstring'''
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=SCREAMING_SNAKE_CASE_ ) as bytestream:
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
if magic != 2_0_4_9:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
A__ = _readaa(SCREAMING_SNAKE_CASE_ )
A__ = bytestream.read(SCREAMING_SNAKE_CASE_ )
A__ = numpy.frombuffer(SCREAMING_SNAKE_CASE_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return labels
class a__ :
"""simple docstring"""
@deprecated(
lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self , lowercase , lowercase , lowercase=False , lowercase=False , lowercase=dtypes.floataa , lowercase=True , lowercase=None , ) -> Tuple:
'''simple docstring'''
A__ , A__ = random_seed.get_seed(lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A__ = dtypes.as_dtype(lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
A__ = 10000
A__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
A__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A__ = images.astype(numpy.floataa )
A__ = numpy.multiply(lowercase , 1.0 / 255.0 )
A__ = images
A__ = labels
A__ = 0
A__ = 0
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self._images
@property
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
return self._labels
@property
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
return self._num_examples
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._epochs_completed
def UpperCamelCase ( self , lowercase , lowercase=False , lowercase=True ) -> List[str]:
'''simple docstring'''
if fake_data:
A__ = [1] * 784
A__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowercase )],
[fake_label for _ in range(lowercase )],
)
A__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A__ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A__ = self.images[perma]
A__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A__ = self._num_examples - start
A__ = self._images[start : self._num_examples]
A__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A__ = numpy.arange(self._num_examples )
numpy.random.shuffle(lowercase )
A__ = self.images[perm]
A__ = self.labels[perm]
# Start next epoch
A__ = 0
A__ = batch_size - rest_num_examples
A__ = self._index_in_epoch
A__ = self._images[start:end]
A__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
A__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(SCREAMING_SNAKE_CASE_ , "Please write your own downloading logic." )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Union[str, Any] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: Tuple ) -> Optional[Any]:
'''simple docstring'''
if not gfile.Exists(SCREAMING_SNAKE_CASE_ ):
gfile.MakeDirs(SCREAMING_SNAKE_CASE_ )
A__ = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not gfile.Exists(SCREAMING_SNAKE_CASE_ ):
urllib.request.urlretrieve(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # noqa: S310
with gfile.GFile(SCREAMING_SNAKE_CASE_ ) as f:
A__ = f.size()
print("Successfully downloaded" , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , "bytes." )
return filepath
@deprecated(
SCREAMING_SNAKE_CASE_ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Optional[Any]=False , SCREAMING_SNAKE_CASE_: Dict=False , SCREAMING_SNAKE_CASE_: Optional[int]=dtypes.floataa , SCREAMING_SNAKE_CASE_: List[str]=True , SCREAMING_SNAKE_CASE_: List[Any]=5_0_0_0 , SCREAMING_SNAKE_CASE_: List[str]=None , SCREAMING_SNAKE_CASE_: int=DEFAULT_SOURCE_URL , ) -> Optional[int]:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=SCREAMING_SNAKE_CASE_ , one_hot=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ )
A__ = fake()
A__ = fake()
A__ = fake()
return _Datasets(train=SCREAMING_SNAKE_CASE_ , validation=SCREAMING_SNAKE_CASE_ , test=SCREAMING_SNAKE_CASE_ )
if not source_url: # empty string check
A__ = DEFAULT_SOURCE_URL
A__ = "train-images-idx3-ubyte.gz"
A__ = "train-labels-idx1-ubyte.gz"
A__ = "t10k-images-idx3-ubyte.gz"
A__ = "t10k-labels-idx1-ubyte.gz"
A__ = _maybe_download(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , source_url + train_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
A__ = _extract_images(SCREAMING_SNAKE_CASE_ )
A__ = _maybe_download(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , source_url + train_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
A__ = _extract_labels(SCREAMING_SNAKE_CASE_ , one_hot=SCREAMING_SNAKE_CASE_ )
A__ = _maybe_download(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , source_url + test_images_file )
with gfile.Open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
A__ = _extract_images(SCREAMING_SNAKE_CASE_ )
A__ = _maybe_download(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , source_url + test_labels_file )
with gfile.Open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
A__ = _extract_labels(SCREAMING_SNAKE_CASE_ , one_hot=SCREAMING_SNAKE_CASE_ )
if not 0 <= validation_size <= len(SCREAMING_SNAKE_CASE_ ):
A__ = (
"Validation size should be between 0 and "
F'{len(SCREAMING_SNAKE_CASE_ )}. Received: {validation_size}.'
)
raise ValueError(SCREAMING_SNAKE_CASE_ )
A__ = train_images[:validation_size]
A__ = train_labels[:validation_size]
A__ = train_images[validation_size:]
A__ = train_labels[validation_size:]
A__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
A__ = _DataSet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = _DataSet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = _DataSet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _Datasets(train=SCREAMING_SNAKE_CASE_ , validation=SCREAMING_SNAKE_CASE_ , test=SCREAMING_SNAKE_CASE_ )
| 68
|
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = AutoencoderKL
__lowerCamelCase = 'sample'
__lowerCamelCase = 1e-2
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = 4
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase )
return {"sample": image}
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
@property
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
A__ = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ , A__ = self.prepare_init_args_and_inputs_for_common()
A__ = self.model_class(**lowercase )
model.to(lowercase )
assert not model.is_gradient_checkpointing and model.training
A__ = model(**lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
A__ = torch.randn_like(lowercase )
A__ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
A__ = self.model_class(**lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
A__ = model_a(**lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
A__ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
A__ = dict(model.named_parameters() )
A__ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ , A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=lowercase )
self.assertIsNotNone(lowercase )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase )
A__ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" )
A__ = model.to(lowercase )
model.eval()
if torch_device == "mps":
A__ = torch.manual_seed(0 )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(0 )
A__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
A__ = image.to(lowercase )
with torch.no_grad():
A__ = model(lowercase , sample_posterior=lowercase , generator=lowercase ).sample
A__ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
A__ = torch.tensor(
[
-4.00_78e-01,
-3.83_23e-04,
-1.26_81e-01,
-1.14_62e-01,
2.00_95e-01,
1.08_93e-01,
-8.82_47e-02,
-3.03_61e-01,
-9.86_44e-03,
] )
elif torch_device == "cpu":
A__ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
A__ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase , lowercase , rtol=1e-2 ) )
@slow
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowercase ) for s in shape] )}.npy'
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self , lowercase=0 , lowercase=(4, 3, 512, 512) , lowercase=False ) -> Optional[int]:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
A__ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase , lowercase ) ) ).to(lowercase ).to(lowercase )
return image
def UpperCamelCase ( self , lowercase="CompVis/stable-diffusion-v1-4" , lowercase=False ) -> Any:
'''simple docstring'''
A__ = "fp16" if fpaa else None
A__ = torch.floataa if fpaa else torch.floataa
A__ = AutoencoderKL.from_pretrained(
lowercase , subfolder="vae" , torch_dtype=lowercase , revision=lowercase , )
model.to(lowercase ).eval()
return model
def UpperCamelCase ( self , lowercase=0 ) -> List[str]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(lowercase )
return torch.Generator(device=lowercase ).manual_seed(lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> int:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase , lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> List[Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , fpaa=lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model(lowercase , generator=lowercase , sample_posterior=lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase , lowercase ) -> Dict:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
with torch.no_grad():
A__ = model(lowercase ).sample
assert sample.shape == image.shape
A__ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice )
assert torch_all_close(lowercase , lowercase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> Tuple:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def UpperCamelCase ( self , lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A__ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A__ = torch.tensor(lowercase )
assert torch_all_close(lowercase , lowercase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase ( self , lowercase ) -> Optional[Any]:
'''simple docstring'''
A__ = self.get_sd_vae_model(fpaa=lowercase )
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) , fpaa=lowercase )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase , lowercase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." )
def UpperCamelCase ( self , lowercase ) -> List[str]:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
A__ = model.decode(lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A__ = model.decode(lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase , lowercase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = self.get_sd_vae_model()
A__ = self.get_sd_image(lowercase )
A__ = self.get_generator(lowercase )
with torch.no_grad():
A__ = model.encode(lowercase ).latent_dist
A__ = dist.sample(generator=lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
A__ = sample[0, -1, -3:, -3:].flatten().cpu()
A__ = torch.tensor(lowercase )
A__ = 3e-3 if torch_device != "mps" else 1e-2
assert torch_all_close(lowercase , lowercase , atol=lowercase )
| 68
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : List[str] = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Tuple = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 360
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
snake_case : Tuple = logging.get_logger(__name__)
enable_full_determinism()
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : str = UNetaDModel
UpperCAmelCase__ : str = '''sample'''
@property
def lowerCamelCase__( self :Optional[int] ) -> List[str]:
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Tuple:
return (3, 32, 32)
@property
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
return (3, 32, 32)
def lowerCamelCase__( self :str ) -> Tuple:
a__ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a__ = self.dummy_input
return init_dict, inputs_dict
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : int = UNetaDModel
UpperCAmelCase__ : Any = '''sample'''
@property
def lowerCamelCase__( self :Dict ) -> List[str]:
a__ = 4
a__ = 4
a__ = (32, 32)
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor([10] ).to(__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Any ) -> str:
return (4, 32, 32)
@property
def lowerCamelCase__( self :Any ) -> Dict:
return (4, 32, 32)
def lowerCamelCase__( self :int ) -> int:
a__ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a__ = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase__( self :str ) -> Any:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model.to(__snake_case )
a__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' ,'This test is supposed to run on GPU' )
def lowerCamelCase__( self :Union[str, Any] ) -> int:
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
a__ , a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case )
model_accelerate.to(__snake_case )
model_accelerate.eval()
a__ = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
a__ = model_accelerate(__snake_case ,__snake_case )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a__ , a__ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' ,output_loading_info=__snake_case ,low_cpu_mem_usage=__snake_case )
model_normal_load.to(__snake_case )
model_normal_load.eval()
a__ = model_normal_load(__snake_case ,__snake_case )['sample']
assert torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 )
def lowerCamelCase__( self :str ) -> Union[str, Any]:
a__ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(__snake_case )
a__ = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
a__ = noise.to(__snake_case )
a__ = torch.tensor([10] * noise.shape[0] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-3 ) )
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Dict = UNetaDModel
UpperCAmelCase__ : Optional[Any] = '''sample'''
@property
def lowerCamelCase__( self :Optional[Any] ,__snake_case :List[Any]=(32, 32) ) -> Optional[int]:
a__ = 4
a__ = 3
a__ = floats_tensor((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=__snake_case )
return {"sample": noise, "timestep": time_step}
@property
def lowerCamelCase__( self :Tuple ) -> Optional[int]:
return (3, 32, 32)
@property
def lowerCamelCase__( self :Optional[Any] ) -> Optional[int]:
return (3, 32, 32)
def lowerCamelCase__( self :Optional[Any] ) -> List[str]:
a__ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCamelCase__( self :str ) -> Tuple:
a__ , a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' ,output_loading_info=__snake_case )
self.assertIsNotNone(__snake_case )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(__snake_case )
a__ = self.dummy_input
a__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(__snake_case )
a__ = noise
a__ = model(**__snake_case )
assert image is not None, "Make sure output is not None"
@slow
def lowerCamelCase__( self :Union[str, Any] ) -> Dict:
a__ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (2_56, 2_56)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :Dict ) -> int:
a__ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(__snake_case )
a__ = 4
a__ = 3
a__ = (32, 32)
a__ = torch.ones((batch_size, num_channels) + sizes ).to(__snake_case )
a__ = torch.tensor(batch_size * [1E-4] ).to(__snake_case )
with torch.no_grad():
a__ = model(__snake_case ,__snake_case ).sample
a__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a__ = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(__snake_case ,__snake_case ,rtol=1E-2 ) )
def lowerCamelCase__( self :int ) -> str:
# not required for this model
pass
| 109
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "segformer"
def __init__( self : List[str] , snake_case_ : Dict=3 , snake_case_ : str=4 , snake_case_ : List[str]=[2, 2, 2, 2] , snake_case_ : Dict=[8, 4, 2, 1] , snake_case_ : Dict=[32, 64, 160, 256] , snake_case_ : Union[str, Any]=[7, 3, 3, 3] , snake_case_ : Dict=[4, 2, 2, 2] , snake_case_ : Any=[1, 2, 5, 8] , snake_case_ : Union[str, Any]=[4, 4, 4, 4] , snake_case_ : Dict="gelu" , snake_case_ : Any=0.0 , snake_case_ : List[str]=0.0 , snake_case_ : List[str]=0.1 , snake_case_ : Dict=0.02 , snake_case_ : int=0.1 , snake_case_ : str=1E-6 , snake_case_ : Optional[int]=256 , snake_case_ : int=255 , **snake_case_ : List[str] , ):
super().__init__(**snake_case_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , snake_case_ , )
snake_case__ : List[str] = num_channels
snake_case__ : List[str] = num_encoder_blocks
snake_case__ : str = depths
snake_case__ : Optional[Any] = sr_ratios
snake_case__ : Optional[Any] = hidden_sizes
snake_case__ : Any = patch_sizes
snake_case__ : Optional[Any] = strides
snake_case__ : Dict = mlp_ratios
snake_case__ : Any = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = classifier_dropout_prob
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : Tuple = drop_path_rate
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Dict = decoder_hidden_size
snake_case__ : Tuple = kwargs.get("""reshape_last_stage""" , snake_case_ )
snake_case__ : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = version.parse("1.11" )
@property
def lowerCamelCase ( self : Union[str, Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Any ):
return 1E-4
@property
def lowerCamelCase ( self : int ):
return 12
| 35
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"
),
}
}
__a = {
"junnyu/roformer_chinese_small": 1536,
"junnyu/roformer_chinese_base": 1536,
"junnyu/roformer_chinese_char_small": 512,
"junnyu/roformer_chinese_char_base": 512,
"junnyu/roformer_small_discriminator": 128,
"junnyu/roformer_small_generator": 128,
}
__a = {
"junnyu/roformer_chinese_small": {"do_lower_case": True},
"junnyu/roformer_chinese_base": {"do_lower_case": True},
"junnyu/roformer_chinese_char_small": {"do_lower_case": True},
"junnyu/roformer_chinese_char_base": {"do_lower_case": True},
"junnyu/roformer_small_discriminator": {"do_lower_case": True},
"junnyu/roformer_small_generator": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = RoFormerTokenizer
def __init__( self : List[Any] , snake_case_ : List[str]=None , snake_case_ : Dict=None , snake_case_ : Any=True , snake_case_ : str="[UNK]" , snake_case_ : List[str]="[SEP]" , snake_case_ : Optional[Any]="[PAD]" , snake_case_ : Union[str, Any]="[CLS]" , snake_case_ : Union[str, Any]="[MASK]" , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=None , **snake_case_ : Tuple , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
snake_case__ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or pre_tok_state.get("""strip_accents""" , snake_case_ ) != strip_accents
):
snake_case__ : str = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : Union[str, Any] = pre_tok_class(**snake_case_ )
snake_case__ : str = do_lower_case
def __getstate__( self : int ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : str = BertPreTokenizer()
return state
def __setstate__( self : Dict , snake_case_ : Dict ):
snake_case__ : List[Any] = d
snake_case__ : Union[str, Any] = self.__dict__["""_tokenizer"""].get_vocab()
snake_case__ : List[Any] = PreTokenizer.custom(JiebaPreTokenizer(snake_case_ ) )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] , snake_case_ : List[str]=None ):
snake_case__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase ( self : Dict , snake_case_ : str , snake_case_ : Optional[str] = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : Union[str, Any]=False , **snake_case_ : Tuple , ):
snake_case__ : Optional[Any] = BertPreTokenizer()
return super().save_pretrained(snake_case_ , snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
| 35
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split()
A__ : Optional[int] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A__ : int = {
"""unk_token""": """<unk>""",
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
}
A__ : Union[str, Any] = {
"""feature_size""": 1,
"""padding_value""": 0.0,
"""sampling_rate""": 1_6000,
"""return_attention_mask""": False,
"""do_normalize""": True,
}
A__ : List[str] = tempfile.mkdtemp()
A__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : Any = os.path.join(self.tmpdirname , snake_case )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case ) + """\n""" )
# load decoder from hub
A__ : Tuple = """hf-internal-testing/ngram-beam-search-decoder"""
def _UpperCamelCase ( self : Dict , **snake_case : str ):
'''simple docstring'''
A__ : int = self.add_kwargs_tokens_map.copy()
kwargs.update(snake_case )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCamelCase ( self : str , **snake_case : Optional[int] ):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **snake_case )
def _UpperCamelCase ( self : Union[str, Any] , **snake_case : Union[str, Any] ):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Any = self.get_tokenizer()
A__ : Union[str, Any] = self.get_feature_extractor()
A__ : List[Any] = self.get_decoder()
A__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
processor.save_pretrained(self.tmpdirname )
A__ : str = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Dict = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
A__ : int = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : int = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["""xx"""] )
with self.assertRaisesRegex(snake_case , """include""" ):
WavaVecaProcessorWithLM(
tokenizer=snake_case , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Any = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : str = self.get_decoder()
A__ : Any = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : Tuple = floats_list((3, 1000) )
A__ : List[Any] = feature_extractor(snake_case , return_tensors="""np""" )
A__ : Optional[Any] = processor(snake_case , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : Tuple = self.get_feature_extractor()
A__ : Dict = self.get_tokenizer()
A__ : Optional[Any] = self.get_decoder()
A__ : Tuple = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : Optional[Any] = """This is a test string"""
A__ : Dict = processor(text=snake_case )
A__ : int = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCamelCase ( self : Tuple , snake_case : int=(2, 10, 16) , snake_case : Union[str, Any]=77 ):
'''simple docstring'''
np.random.seed(snake_case )
return np.random.rand(*snake_case )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Any = self.get_feature_extractor()
A__ : List[Any] = self.get_tokenizer()
A__ : List[str] = self.get_decoder()
A__ : str = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
A__ : List[str] = processor.decode(snake_case )
A__ : List[str] = decoder.decode_beams(snake_case )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("""</s> <s> </s>""" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["""fork"""], ["""spawn"""]] )
def _UpperCamelCase ( self : List[Any] , snake_case : List[str] ):
'''simple docstring'''
A__ : List[Any] = self.get_feature_extractor()
A__ : List[Any] = self.get_tokenizer()
A__ : Optional[int] = self.get_decoder()
A__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : Tuple = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
A__ : int = processor.batch_decode(snake_case )
else:
with get_context(snake_case ).Pool() as pool:
A__ : Optional[Any] = processor.batch_decode(snake_case , snake_case )
A__ : Tuple = list(snake_case )
with get_context("""fork""" ).Pool() as p:
A__ : Union[str, Any] = decoder.decode_beams_batch(snake_case , snake_case )
A__ , A__ , A__ : int = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(snake_case , decoded_processor.text )
self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text )
self.assertListEqual(snake_case , decoded_processor.logit_score )
self.assertListEqual(snake_case , decoded_processor.lm_score )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : List[Any] = self.get_feature_extractor()
A__ : str = self.get_tokenizer()
A__ : Dict = self.get_decoder()
A__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : List[str] = self._get_dummy_logits()
A__ : List[Any] = 15
A__ : Any = -20.0
A__ : Dict = -4.0
A__ : Dict = processor.batch_decode(
snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
A__ : Optional[int] = decoded_processor_out.text
A__ : List[str] = list(snake_case )
with get_context("""fork""" ).Pool() as pool:
A__ : Any = decoder.decode_beams_batch(
snake_case , snake_case , beam_width=snake_case , beam_prune_logp=snake_case , token_min_logp=snake_case , )
A__ : Optional[int] = [d[0][0] for d in decoded_decoder_out]
A__ : int = [d[0][2] for d in decoded_decoder_out]
A__ : Tuple = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , snake_case )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , snake_case , atol=1e-3 ) )
self.assertTrue(np.array_equal(snake_case , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , snake_case , atol=1e-3 ) )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Dict = self.get_feature_extractor()
A__ : List[str] = self.get_tokenizer()
A__ : List[Any] = self.get_decoder()
A__ : int = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
A__ : List[str] = self._get_dummy_logits()
A__ : Union[str, Any] = 2.0
A__ : Any = 5.0
A__ : int = -20.0
A__ : int = True
A__ : List[Any] = processor.batch_decode(
snake_case , alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
A__ : Optional[Any] = decoded_processor_out.text
A__ : Union[str, Any] = list(snake_case )
decoder.reset_params(
alpha=snake_case , beta=snake_case , unk_score_offset=snake_case , lm_score_boundary=snake_case , )
with get_context("""fork""" ).Pool() as pool:
A__ : Optional[int] = decoder.decode_beams_batch(
snake_case , snake_case , )
A__ : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , snake_case )
A__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , snake_case )
def _UpperCamelCase ( self : Tuple ):
'''simple docstring'''
A__ : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
A__ : Tuple = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A__ : Tuple = os.listdir(snake_case )
A__ : int = ["""alphabet.json""", """language_model"""]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(snake_case , snake_case )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : str = snapshot_download("""hf-internal-testing/processor_with_lm""" )
A__ : Tuple = WavaVecaProcessorWithLM.from_pretrained(snake_case )
A__ : List[Any] = processor.decoder.model_container[processor.decoder._model_key]
A__ : List[str] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute()
A__ : Optional[int] = os.listdir(snake_case )
A__ : Any = os.listdir(snake_case )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(snake_case , snake_case )
def _UpperCamelCase ( self : Dict ):
'''simple docstring'''
A__ : str = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A__ : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A__ : Union[str, Any] = floats_list((3, 1000) )
A__ : Tuple = processor_wavaveca(snake_case , return_tensors="""np""" )
A__ : Any = processor_auto(snake_case , return_tensors="""np""" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
A__ : List[str] = self._get_dummy_logits()
A__ : Dict = processor_wavaveca.batch_decode(snake_case )
A__ : str = processor_auto.batch_decode(snake_case )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = self.get_feature_extractor()
A__ : Union[str, Any] = self.get_tokenizer()
A__ : Union[str, Any] = self.get_decoder()
A__ : str = WavaVecaProcessorWithLM(tokenizer=snake_case , feature_extractor=snake_case , decoder=snake_case )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
@staticmethod
def _UpperCamelCase ( snake_case : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
A__ : List[str] = [d[key] for d in offsets]
return retrieved_list
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A__ : str = self._get_dummy_logits()[0]
A__ : Optional[int] = processor.decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] )
def _UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
A__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" )
A__ : Optional[int] = self._get_dummy_logits()
A__ : List[str] = processor.batch_decode(snake_case , output_word_offsets=snake_case )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("""text""" in outputs )
self.assertTrue("""word_offsets""" in outputs )
self.assertTrue(isinstance(snake_case , snake_case ) )
self.assertListEqual(
[""" """.join(self.get_from_offsets(snake_case , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
import torch
A__ : Optional[int] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=snake_case )
A__ : Dict = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_6000 ) )
A__ : int = iter(snake_case )
A__ : List[str] = next(snake_case )
A__ : Tuple = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
A__ : Union[str, Any] = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
A__ : Optional[int] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values
with torch.no_grad():
A__ : List[str] = model(snake_case ).logits.cpu().numpy()
A__ : Tuple = processor.decode(logits[0] , output_word_offsets=snake_case )
A__ : Union[str, Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
A__ : Union[str, Any] = [
{
"""start_time""": d["""start_offset"""] * time_offset,
"""end_time""": d["""end_offset"""] * time_offset,
"""word""": d["""word"""],
}
for d in output["""word_offsets"""]
]
A__ : Optional[int] = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"""
# output words
self.assertEqual(""" """.join(self.get_from_offsets(snake_case , """word""" ) ) , snake_case )
self.assertEqual(""" """.join(self.get_from_offsets(snake_case , """word""" ) ) , output.text )
# output times
A__ : Optional[int] = torch.tensor(self.get_from_offsets(snake_case , """start_time""" ) )
A__ : Dict = torch.tensor(self.get_from_offsets(snake_case , """end_time""" ) )
# fmt: off
A__ : Optional[int] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
A__ : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
self.assertTrue(torch.allclose(snake_case , snake_case , atol=0.01 ) )
| 296
|
"""simple docstring"""
import os
from distutils.util import strtobool
def _lowerCAmelCase ( UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Optional[Any] ) ->List[str]:
for e in env_keys:
A__ : List[Any] = int(os.environ.get(UpperCAmelCase__, -1 ) )
if val >= 0:
return val
return default
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=False ) ->List[str]:
A__ : List[Any] = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return strtobool(UpperCAmelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[Any]="no" ) ->int:
A__ : str = os.environ.get(UpperCAmelCase__, str(UpperCAmelCase__ ) )
return value
| 296
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=1_3 , UpperCamelCase__ : str=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Tuple=9_9 , UpperCamelCase__ : List[str]=3_2 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[str]=5_1_2 , UpperCamelCase__ : Optional[Any]=1_6 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : int=0.0_2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=4 , UpperCamelCase__ : Any=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_input_mask
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_labels
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : List[str] ):
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
def A ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = NystromformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ )
UpperCamelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Any ):
"""simple docstring"""
UpperCamelCase = NystromformerForMaskedLM(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
UpperCamelCase = NystromformerForQuestionAnswering(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = NystromformerForSequenceClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = NystromformerForTokenClassification(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase = self.num_choices
UpperCamelCase = NystromformerForMultipleChoice(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = NystromformerModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7 )
def A ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCamelCase = type
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ )
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = NystromformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = NystromformerModel.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ )[0]
UpperCamelCase = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , UpperCamelCase__ )
UpperCamelCase = torch.tensor(
[[[-0.4_5_3_2, -0.0_9_3_6, 0.5_1_3_7], [-0.2_6_7_6, 0.0_6_2_8, 0.6_1_8_6], [-0.3_6_2_9, -0.1_7_2_6, 0.4_7_1_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = 'the [MASK] of Belgium is Brussels'
UpperCamelCase = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512' )
UpperCamelCase = tokenizer(UpperCamelCase__ , return_tensors='pt' )
with torch.no_grad():
UpperCamelCase = model(encoding.input_ids ).logits
UpperCamelCase = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , 'capital' )
| 28
|
from collections import namedtuple
a : List[Any] = namedtuple('from_to', 'from_ to')
a : Tuple = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.0_0_1, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0_4_5_4, 2_6_4.1_7_2),
'cubicyard': from_to(0.7_6_4_5_5, 1.3_0_7_9_5),
'cubicfoot': from_to(0.0_2_8, 3_5.3_1_4_7),
'cup': from_to(0.0_0_0_2_3_6_5_8_8, 4_2_2_6.7_5),
}
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: str , lowerCAmelCase__: str ):
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ """, """.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 0
|
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: Any = {
"facebook/encodec_24khz": "https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json",
"facebook/encodec_48khz": "https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json",
}
class snake_case__ ( __snake_case ):
lowercase__ : Union[str, Any] = 'encodec'
def __init__( self , lowerCAmelCase__=[1.5, 3.0, 6.0, 1_2.0, 2_4.0] , lowerCAmelCase__=2_40_00 , lowerCAmelCase__=1 , lowerCAmelCase__=False , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=1_28 , lowerCAmelCase__=32 , lowerCAmelCase__=1 , lowerCAmelCase__=[8, 5, 4, 2] , lowerCAmelCase__="weight_norm" , lowerCAmelCase__=7 , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__="reflect" , lowerCAmelCase__=2 , lowerCAmelCase__=2 , lowerCAmelCase__=1.0 , lowerCAmelCase__=10_24 , lowerCAmelCase__=None , lowerCAmelCase__=True , **lowerCAmelCase__ , ) -> str:
__magic_name__ : Union[str, Any] = target_bandwidths
__magic_name__ : Optional[Any] = sampling_rate
__magic_name__ : Optional[Any] = audio_channels
__magic_name__ : Optional[int] = normalize
__magic_name__ : List[Any] = chunk_length_s
__magic_name__ : List[Any] = overlap
__magic_name__ : Any = hidden_size
__magic_name__ : Tuple = num_filters
__magic_name__ : Dict = num_residual_layers
__magic_name__ : List[str] = upsampling_ratios
__magic_name__ : List[str] = norm_type
__magic_name__ : Optional[Any] = kernel_size
__magic_name__ : Dict = last_kernel_size
__magic_name__ : Tuple = residual_kernel_size
__magic_name__ : int = dilation_growth_rate
__magic_name__ : Optional[int] = use_causal_conv
__magic_name__ : str = pad_mode
__magic_name__ : Any = compress
__magic_name__ : Any = num_lstm_layers
__magic_name__ : List[str] = trim_right_ratio
__magic_name__ : Tuple = codebook_size
__magic_name__ : Tuple = codebook_dim if codebook_dim is not None else hidden_size
__magic_name__ : Optional[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
F'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' )
super().__init__(**lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __magic_name__ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def __magic_name__ ( self ) -> int:
__magic_name__ : Union[str, Any] = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def __magic_name__ ( self ) -> int:
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
| 362
|
from manim import *
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self ) -> Dict:
__magic_name__ : int = Rectangle(height=0.5 , width=0.5 )
__magic_name__ : Optional[int] = Rectangle(height=0.2_5 , width=0.2_5 )
__magic_name__ : str = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
__magic_name__ : List[Any] = [mem.copy() for i in range(6 )]
__magic_name__ : int = [mem.copy() for i in range(6 )]
__magic_name__ : Tuple = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : List[str] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Union[str, Any] = Text("""CPU""" , font_size=24 )
__magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Any = [mem.copy() for i in range(4 )]
__magic_name__ : List[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Tuple = Text("""GPU""" , font_size=24 )
__magic_name__ : Tuple = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
gpu.move_to([-1, -1, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = [mem.copy() for i in range(6 )]
__magic_name__ : Union[str, Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = Text("""Model""" , font_size=24 )
__magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
model.move_to([3, -1.0, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : str = []
__magic_name__ : Tuple = []
__magic_name__ : Union[str, Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
rect.set_stroke(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(lowerCAmelCase__ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCAmelCase__ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=lowerCAmelCase__ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=lowerCAmelCase__ , buff=0.0 )
self.add(lowerCAmelCase__ )
model_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ )
__magic_name__ : Optional[Any] = [mem.copy() for i in range(6 )]
__magic_name__ : Optional[Any] = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Any = Text("""Loaded Checkpoint""" , font_size=24 )
__magic_name__ : Optional[int] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
checkpoint.move_to([3, 0.5, 0] )
self.add(lowerCAmelCase__ )
__magic_name__ : Optional[int] = []
__magic_name__ : Tuple = []
for i, rect in enumerate(lowerCAmelCase__ ):
__magic_name__ : Dict = fill.copy().set_fill(lowerCAmelCase__ , opacity=0.7 )
target.move_to(lowerCAmelCase__ )
ckpt_arr.append(lowerCAmelCase__ )
__magic_name__ : int = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(lowerCAmelCase__ )
self.add(*lowerCAmelCase__ , *lowerCAmelCase__ )
__magic_name__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__magic_name__ : str = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Any = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(lowerCAmelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(lowerCAmelCase__ )
__magic_name__ : Optional[Any] = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
__magic_name__ : int = [meta_mem.copy() for i in range(6 )]
__magic_name__ : Union[str, Any] = [meta_mem.copy() for i in range(6 )]
__magic_name__ : Any = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : str = VGroup(*lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : Tuple = VGroup(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0 )
__magic_name__ : int = Text("""Disk""" , font_size=24 )
__magic_name__ : Union[str, Any] = Group(lowerCAmelCase__ , lowerCAmelCase__ ).arrange(lowerCAmelCase__ , buff=0.5 , aligned_edge=lowerCAmelCase__ )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) , Write(lowerCAmelCase__ , run_time=1 ) , Create(lowerCAmelCase__ , run_time=1 ) )
__magic_name__ : List[Any] = []
for i, rect in enumerate(lowerCAmelCase__ ):
__magic_name__ : Dict = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(lowerCAmelCase__ , run_time=1.5 ) )
self.play(*lowerCAmelCase__ )
self.play(FadeOut(lowerCAmelCase__ ) )
__magic_name__ : str = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCAmelCase__ , run_time=3 ) )
self.play(
FadeOut(lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ , *lowerCAmelCase__ ) , )
self.wait()
| 138
| 0
|
from __future__ import annotations
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , ) -> str:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 339
|
'''simple docstring'''
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
_lowerCAmelCase = datasets.logging.get_logger(__name__)
_lowerCAmelCase = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
_lowerCAmelCase = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
_lowerCAmelCase = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
_lowerCAmelCase = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,homepage="""https://github.com/google-research/bleurt""" ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" ,id="""sequence""" ),
"""references""": datasets.Value("""string""" ,id="""sequence""" ),
} ) ,codebase_urls=["""https://github.com/google-research/bleurt"""] ,reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
lowerCAmelCase__ : str = """bleurt-base-128"""
if self.config_name.lower() in CHECKPOINT_URLS:
lowerCAmelCase__ : Union[str, Any] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
lowerCAmelCase__ : List[Any] = self.config_name.upper()
else:
raise KeyError(
F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" )
# download the model checkpoint specified by self.config_name and set up the scorer
lowerCAmelCase__ : int = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
lowerCAmelCase__ : Dict = score.BleurtScorer(os.path.join(__UpperCAmelCase ,__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = self.scorer.score(references=__UpperCAmelCase ,candidates=__UpperCAmelCase )
return {"scores": scores}
| 37
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 'transfo-xl'
lowercase__ : str = ['mems']
lowercase__ : Union[str, Any] = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowerCamelCase__=2_6_7_7_3_5 , lowerCamelCase__=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_0_2_4 , lowerCamelCase__=1_6 , lowerCamelCase__=6_4 , lowerCamelCase__=4_0_9_6 , lowerCamelCase__=4 , lowerCamelCase__=False , lowerCamelCase__=1_8 , lowerCamelCase__=1_6_0_0 , lowerCamelCase__=1_0_0_0 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=0 , lowerCamelCase__=-1 , lowerCamelCase__=True , lowerCamelCase__=0.1 , lowerCamelCase__=0.0 , lowerCamelCase__=True , lowerCamelCase__="normal" , lowerCamelCase__=0.0_1 , lowerCamelCase__=0.0_1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__=0 , **lowerCamelCase__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = []
self.cutoffs.extend(lowerCamelCase__ )
if proj_share_all_but_first:
_lowerCamelCase = [False] + [True] * len(self.cutoffs )
else:
_lowerCamelCase = [False] + [False] * len(self.cutoffs )
_lowerCamelCase = d_model
_lowerCamelCase = d_embed
_lowerCamelCase = d_head
_lowerCamelCase = d_inner
_lowerCamelCase = div_val
_lowerCamelCase = pre_lnorm
_lowerCamelCase = n_layer
_lowerCamelCase = n_head
_lowerCamelCase = mem_len
_lowerCamelCase = same_length
_lowerCamelCase = attn_type
_lowerCamelCase = clamp_len
_lowerCamelCase = sample_softmax
_lowerCamelCase = adaptive
_lowerCamelCase = dropout
_lowerCamelCase = dropatt
_lowerCamelCase = untie_r
_lowerCamelCase = init
_lowerCamelCase = init_range
_lowerCamelCase = proj_init_std
_lowerCamelCase = init_std
_lowerCamelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def snake_case__ ( self ):
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def snake_case__ ( self , lowerCamelCase__ ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 73
|
"""simple docstring"""
import qiskit
def lowerCAmelCase_( lowercase_ : int , lowercase_ : int ) -> qiskit.result.counts.Counts:
_lowerCamelCase = qiskit.Aer.get_backend('''aer_simulator''' )
# Create a Quantum Circuit acting on the q register
_lowerCamelCase = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_lowerCamelCase = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 73
| 1
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def A (__A : Optional[int] , __A : Any , __A : str=1024 , __A : Tuple=1024 , __A : int=False , **__A : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__A )
UpperCAmelCase_ = SeqaSeqDataset(__A , __A , __A , __A , type_path='''train''' , **__A )
UpperCAmelCase_ = tok.pad_token_id
def get_lens(__A : Optional[int] ):
UpperCAmelCase_ = tqdm(
DataLoader(__A , batch_size=512 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
UpperCAmelCase_ = []
for batch in dl:
UpperCAmelCase_ = batch['''input_ids'''].ne(__A ).sum(1 ).tolist()
UpperCAmelCase_ = batch['''labels'''].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
UpperCAmelCase_ = get_lens(__A )
UpperCAmelCase_ = SeqaSeqDataset(__A , __A , __A , __A , type_path='''val''' , **__A )
UpperCAmelCase_ = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 51
|
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _snake_case ( UpperCamelCase : Callable , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float , UpperCamelCase : float ):
UpperCAmelCase : Any = int(np.ceil((x_end - xa) / step_size ) )
UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) )
UpperCAmelCase : Optional[int] = ya
UpperCAmelCase : int = xa
for k in range(UpperCamelCase ):
UpperCAmelCase : Optional[int] = y[k] + step_size * ode_func(UpperCamelCase , y[k] )
UpperCAmelCase : Optional[int] = y[k] + (
(step_size / 2) * (ode_func(UpperCamelCase , y[k] ) + ode_func(x + step_size , UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 109
| 0
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A ( UpperCamelCase_ ):
__UpperCAmelCase : Tuple = """perceiver"""
def __init__(self : int , __UpperCAmelCase : Any=2_5_6 , __UpperCAmelCase : Dict=1_2_8_0 , __UpperCAmelCase : List[str]=7_6_8 , __UpperCAmelCase : Tuple=1 , __UpperCAmelCase : Any=2_6 , __UpperCAmelCase : Optional[int]=8 , __UpperCAmelCase : str=8 , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : int="kv" , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=1 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : Union[str, Any]=0.02 , __UpperCAmelCase : Any=1E-12 , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : Dict=2_6_2 , __UpperCAmelCase : List[str]=2_0_4_8 , __UpperCAmelCase : int=5_6 , __UpperCAmelCase : Any=[3_6_8, 4_9_6] , __UpperCAmelCase : Any=1_6 , __UpperCAmelCase : List[str]=1_9_2_0 , __UpperCAmelCase : Optional[Any]=1_6 , __UpperCAmelCase : Any=[1, 1_6, 2_2_4, 2_2_4] , **__UpperCAmelCase : Dict , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_a )
UpperCAmelCase__ = num_latents
UpperCAmelCase__ = d_latents
UpperCAmelCase__ = d_model
UpperCAmelCase__ = num_blocks
UpperCAmelCase__ = num_self_attends_per_block
UpperCAmelCase__ = num_self_attention_heads
UpperCAmelCase__ = num_cross_attention_heads
UpperCAmelCase__ = qk_channels
UpperCAmelCase__ = v_channels
UpperCAmelCase__ = cross_attention_shape_for_attention
UpperCAmelCase__ = self_attention_widening_factor
UpperCAmelCase__ = cross_attention_widening_factor
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = use_query_residual
# masked language modeling attributes
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = max_position_embeddings
# image classification attributes
UpperCAmelCase__ = image_size
# flow attributes
UpperCAmelCase__ = train_size
# multimodal autoencoding attributes
UpperCAmelCase__ = num_frames
UpperCAmelCase__ = audio_samples_per_frame
UpperCAmelCase__ = samples_per_patch
UpperCAmelCase__ = output_shape
class A ( UpperCamelCase_ ):
@property
def lowercase_ (self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def lowercase_ (self : List[str] ) -> float:
"""simple docstring"""
return 1E-4
def lowercase_ (self : Any , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict = -1 , __UpperCAmelCase : Tuple = -1 , __UpperCAmelCase : Tuple = -1 , __UpperCAmelCase : Any = False , __UpperCAmelCase : List[Any] = None , __UpperCAmelCase : List[str] = 3 , __UpperCAmelCase : Optional[int] = 4_0 , __UpperCAmelCase : Union[str, Any] = 4_0 , ) -> Mapping[str, Any]:
"""simple docstring"""
if isinstance(_a , _a ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCAmelCase__ = preprocessor.num_special_tokens_to_add(_a )
UpperCAmelCase__ = compute_effective_axis_dimension(
_a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_a )
# Generate dummy inputs according to compute batch and sequence
UpperCAmelCase__ = [""" """.join(["a"] ) * seq_length] * batch_size
UpperCAmelCase__ = dict(preprocessor(_a , return_tensors=_a ) )
UpperCAmelCase__ = inputs.pop("input_ids" )
return inputs
elif isinstance(_a , _a ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
UpperCAmelCase__ = compute_effective_axis_dimension(_a , fixed_dimension=OnnxConfig.default_fixed_batch )
UpperCAmelCase__ = self._generate_dummy_images(_a , _a , _a , _a )
UpperCAmelCase__ = dict(preprocessor(images=_a , return_tensors=_a ) )
UpperCAmelCase__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 362
|
from __future__ import annotations
def lowerCAmelCase_ ( __A ) -> list[int]:
'''simple docstring'''
if len(__A ) == 0:
return array
UpperCAmelCase__ , UpperCAmelCase__ = min(__A ), max(__A )
# Compute the variables
UpperCAmelCase__ = _max - _min + 1
UpperCAmelCase__ , UpperCAmelCase__ = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
UpperCAmelCase__ = i - _min
UpperCAmelCase__ = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
UpperCAmelCase__ = 0
for i in range(__A ):
while holes_repeat[i] > 0:
UpperCAmelCase__ = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = input('Enter numbers separated by comma:\n')
UpperCamelCase__ = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 143
| 0
|
from PIL import Image
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
'''simple docstring'''
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_ = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 296
|
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") ,up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") ,)
return model
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.dummy_uncond_unet
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=2 ,generator=lowerCamelCase__ ,output_type="""numpy""" ,return_dict=lowerCamelCase__ )[0]
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """google/ncsnpp-celebahq-256"""
SCREAMING_SNAKE_CASE = UNetaDModel.from_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = KarrasVeScheduler()
SCREAMING_SNAKE_CASE = KarrasVePipeline(unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(num_inference_steps=20 ,generator=lowerCamelCase__ ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
SCREAMING_SNAKE_CASE = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 296
| 1
|
"""simple docstring"""
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = mock.Mock()
_lowerCamelCase : Dict = 500
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = HTTPError
_lowerCamelCase : Tuple = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self ):
_lowerCamelCase : Optional[Any] = mock.Mock()
_lowerCamelCase : List[Any] = 500
_lowerCamelCase : Optional[Any] = {}
_lowerCamelCase : Any = HTTPError
_lowerCamelCase : Dict = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : List[Any] = GPTaTokenizerFast.from_pretrained('gpt2' )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=lowerCamelCase__ ) as mock_head:
_lowerCamelCase : List[str] = GPTaTokenizerFast.from_pretrained('gpt2' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self ):
try:
_lowerCamelCase : List[Any] = tempfile.mktemp()
with open(lowerCamelCase__ , 'wb' ) as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , lowerCamelCase__ )
_lowerCamelCase : List[Any] = AlbertTokenizer.from_pretrained(lowerCamelCase__ )
finally:
os.remove(lowerCamelCase__ )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json' ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb' ) as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , lowerCamelCase__ )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2' )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json' )
def A_ ( self ):
_lowerCamelCase : Optional[int] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls ):
_lowerCamelCase : Any = TOKEN
HfFolder.save_token(lowerCamelCase__ )
@classmethod
def A_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-tokenizer' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer' )
except HTTPError:
pass
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Optional[Any] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Any = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCamelCase__ , repo_id='test-tokenizer' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase : Dict = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Optional[int] = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : List[str] = BertTokenizer(lowerCamelCase__ )
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
lowerCamelCase__ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=lowerCamelCase__ , use_auth_token=self._token )
_lowerCamelCase : Any = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org' )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def A_ ( self ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Dict = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : int = CustomTokenizer(lowerCamelCase__ )
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : int = os.path.join(lowerCamelCase__ , 'vocab.txt' )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
_lowerCamelCase : Any = BertTokenizerFast.from_pretrained(lowerCamelCase__ )
bert_tokenizer.save_pretrained(lowerCamelCase__ )
_lowerCamelCase : str = CustomTokenizerFast.from_pretrained(lowerCamelCase__ )
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token )
_lowerCamelCase : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''' , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast' )
_lowerCamelCase : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''' , use_fast=lowerCamelCase__ , trust_remote_code=lowerCamelCase__ )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer' )
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : str = Trie()
trie.add('Hello 友達' )
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}} )
trie.add('Hello' )
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}} )
def A_ ( self ):
_lowerCamelCase : Tuple = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS] This is a extra_id_100'] )
trie.add('[CLS]' )
trie.add('extra_id_1' )
trie.add('extra_id_100' )
self.assertEqual(trie.split('[CLS] This is a extra_id_100' ) , ['[CLS]', ' This is a ', 'extra_id_100'] )
def A_ ( self ):
_lowerCamelCase : int = Trie()
trie.add('A' )
self.assertEqual(trie.split('ABC' ) , ['A', 'BC'] )
self.assertEqual(trie.split('BCA' ) , ['BC', 'A'] )
def A_ ( self ):
_lowerCamelCase : str = Trie()
trie.add('TOKEN]' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = Trie()
trie.add('A' )
trie.add('P' )
trie.add('[SPECIAL_TOKEN]' )
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]' ) , ['This is something ', '[SPECIAL_TOKEN]'] )
def A_ ( self ):
_lowerCamelCase : Any = Trie()
trie.add('AB' )
trie.add('B' )
trie.add('C' )
self.assertEqual(trie.split('ABC' ) , ['AB', 'C'] )
def A_ ( self ):
_lowerCamelCase : List[Any] = Trie()
trie.add('ABC' )
trie.add('B' )
trie.add('CD' )
self.assertEqual(trie.split('ABCD' ) , ['ABC', 'D'] )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = Trie()
_lowerCamelCase : Union[str, Any] = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3] )
self.assertEqual(lowerCamelCase__ , ['AB', 'C'] )
| 356
|
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass
| 12
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = []
def _A ( self : Any , A : Union[str, Any] , A : Optional[int] , A : List[str] , **A : Tuple ):
self.events.append("on_init_end" )
def _A ( self : Any , A : str , A : List[Any] , A : List[Any] , **A : Tuple ):
self.events.append("on_train_begin" )
def _A ( self : Tuple , A : List[str] , A : Tuple , A : int , **A : List[str] ):
self.events.append("on_train_end" )
def _A ( self : Optional[Any] , A : Dict , A : Any , A : Optional[Any] , **A : List[Any] ):
self.events.append("on_epoch_begin" )
def _A ( self : Optional[Any] , A : List[Any] , A : List[str] , A : Optional[int] , **A : Optional[int] ):
self.events.append("on_epoch_end" )
def _A ( self : List[str] , A : Optional[int] , A : List[Any] , A : Union[str, Any] , **A : Any ):
self.events.append("on_step_begin" )
def _A ( self : Tuple , A : Union[str, Any] , A : int , A : Optional[int] , **A : int ):
self.events.append("on_step_end" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Union[str, Any] , A : str , **A : Union[str, Any] ):
self.events.append("on_evaluate" )
def _A ( self : Optional[Any] , A : Optional[int] , A : Dict , A : List[Any] , **A : Dict ):
self.events.append("on_predict" )
def _A ( self : Dict , A : Dict , A : List[Any] , A : Dict , **A : str ):
self.events.append("on_save" )
def _A ( self : Tuple , A : Optional[Any] , A : Union[str, Any] , A : Optional[int] , **A : Dict ):
self.events.append("on_log" )
def _A ( self : Optional[int] , A : Optional[Any] , A : Tuple , A : Tuple , **A : List[str] ):
self.events.append("on_prediction_step" )
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : Optional[int] ):
_UpperCAmelCase : Optional[Any] = tempfile.mkdtemp()
def _A ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def _A ( self : Union[str, Any] , A : Optional[int]=0 , A : Optional[Any]=0 , A : Optional[Any]=64 , A : Dict=64 , A : Any=None , A : Tuple=False , **A : Optional[int] ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
_UpperCAmelCase : str = RegressionDataset(length=A )
_UpperCAmelCase : Union[str, Any] = RegressionDataset(length=A )
_UpperCAmelCase : Any = RegressionModelConfig(a=A , b=A )
_UpperCAmelCase : List[Any] = RegressionPreTrainedModel(A )
_UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=A , report_to=[] , **A )
return Trainer(
A , A , train_dataset=A , eval_dataset=A , callbacks=A , )
def _A ( self : str , A : List[str] , A : List[str] ):
self.assertEqual(len(A ) , len(A ) )
# Order doesn't matter
_UpperCAmelCase : Tuple = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
_UpperCAmelCase : Any = sorted(A , key=lambda A : cb.__name__ if isinstance(A , A ) else cb.__class__.__name__ )
for cba, cba in zip(A , A ):
if isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(A , A )
elif isinstance(A , A ) and not isinstance(A , A ):
self.assertEqual(A , cba.__class__ )
elif not isinstance(A , A ) and isinstance(A , A ):
self.assertEqual(cba.__class__ , A )
else:
self.assertEqual(A , A )
def _A ( self : int , A : List[str] ):
_UpperCAmelCase : List[str] = ["on_init_end", "on_train_begin"]
_UpperCAmelCase : str = 0
_UpperCAmelCase : Optional[Any] = len(trainer.get_eval_dataloader() )
_UpperCAmelCase : Optional[int] = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(A ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _A ( self : str ):
_UpperCAmelCase : Any = self.get_trainer()
_UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# Callbacks passed at init are added to the default callbacks
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_UpperCAmelCase : List[Any] = self.get_trainer(disable_tqdm=A )
_UpperCAmelCase : Tuple = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
_UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_UpperCAmelCase : Dict = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : Optional[Any] = self.get_trainer()
_UpperCAmelCase : Any = trainer.pop_callback(A )
self.assertEqual(cb.__class__ , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
# We can also add, pop, or remove by instance
_UpperCAmelCase : Union[str, Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(A )
expected_callbacks.remove(A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
_UpperCAmelCase : List[Any] = self.get_trainer()
_UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[0]
_UpperCAmelCase : Union[str, Any] = trainer.pop_callback(A )
self.assertEqual(A , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
trainer.add_callback(A )
expected_callbacks.insert(0 , A )
self.check_callbacks_equality(trainer.callback_handler.callbacks , A )
def _A ( self : Optional[Any] ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore" , category=A )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# Independent log/save/eval
_UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_UpperCAmelCase : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : int = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="steps" )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
_UpperCAmelCase : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="epoch" )
trainer.train()
_UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# A bit of everything
_UpperCAmelCase : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="steps" , )
trainer.train()
_UpperCAmelCase : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(A , self.get_expected_events(A ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
_UpperCAmelCase : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(A ) in warn_mock.call_args[0][0]
| 31
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def lowercase__ ( *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any ):
pass
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> str:
'''simple docstring'''
lowerCAmelCase : List[Any] = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowerCAmelCase : Tuple = np.array(_UpperCAmelCase )
lowerCAmelCase : Dict = npimg.shape
return {"hash": hashimage(_UpperCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Dict = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCAmelCase_ : Any = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] ):
lowerCAmelCase : List[str] = MaskGenerationPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def lowercase__ ( self : Dict ):
pass
@slow
@require_torch
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
lowerCAmelCase : Union[str, Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_21},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.99_67},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_93},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.99_09},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.98_79},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.98_34},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.97_16},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.96_12},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.95_99},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.95_52},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.95_32},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.95_16},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.94_99},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.94_83},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.94_64},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_43},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.94_08},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.93_35},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.93_26},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.92_62},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.89_99},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.89_86},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.89_84},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.88_73},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Union[str, Any] = 'facebook/sam-vit-huge'
lowerCAmelCase : str = pipeline('mask-generation' , model=UpperCAmelCase_ )
lowerCAmelCase : int = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowerCAmelCase : Optional[int] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCAmelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.04_44},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.02_10},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.01_67},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.01_32},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.00_53},
] , )
| 138
| 0
|
def __UpperCamelCase ( _A ):
lowerCAmelCase_ , lowerCAmelCase_ = [], []
while len(SCREAMING_SNAKE_CASE__ ) > 1:
lowerCAmelCase_ , lowerCAmelCase_ = min(SCREAMING_SNAKE_CASE__ ), max(SCREAMING_SNAKE_CASE__ )
start.append(SCREAMING_SNAKE_CASE__ )
end.append(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
collection.remove(SCREAMING_SNAKE_CASE__ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_A = input('''Enter numbers separated by a comma:\n''').strip()
_A = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 367
|
from __future__ import annotations
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = len(_A )
# We need to create solution object to save path.
lowerCAmelCase_ = [[0 for _ in range(_A )] for _ in range(_A )]
lowerCAmelCase_ = run_maze(_A , 0 , 0 , _A )
if solved:
print('''\n'''.join(str(_A ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def __UpperCamelCase ( _A , _A , _A , _A ):
lowerCAmelCase_ = len(_A )
# Final check point.
if i == j == (size - 1):
lowerCAmelCase_ = 1
return True
lowerCAmelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
lowerCAmelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCAmelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCAmelCase_ = 1
# check for directions
if (
run_maze(_A , i + 1 , _A , _A )
or run_maze(_A , _A , j + 1 , _A )
or run_maze(_A , i - 1 , _A , _A )
or run_maze(_A , _A , j - 1 , _A )
):
return True
lowerCAmelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''audio-spectrogram-transformer'''
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int=7_6_8 ,SCREAMING_SNAKE_CASE__ : Any=1_2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : str=3_0_7_2 ,SCREAMING_SNAKE_CASE__ : List[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Dict=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1E-12 ,SCREAMING_SNAKE_CASE__ : List[str]=1_6 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2_8 ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Union[str, Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = intermediate_size
__lowerCamelCase : List[str] = hidden_act
__lowerCamelCase : Tuple = hidden_dropout_prob
__lowerCamelCase : str = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : int = qkv_bias
__lowerCamelCase : Optional[int] = frequency_stride
__lowerCamelCase : str = time_stride
__lowerCamelCase : Dict = max_length
__lowerCamelCase : List[Any] = num_mel_bins
| 73
|
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__lowerCamelCase : int = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCamelCase__ ) )
return round(lowerCamelCase__ , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 73
| 1
|
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A_ : Any = threading.Lock()
A_ : Optional[logging.Handler] = None
A_ : Any = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A_ : Optional[int] = logging.WARNING
A_ : Tuple = True
def UpperCamelCase () -> List[Any]:
A__ : List[str] = os.getenv("""TRANSFORMERS_VERBOSITY""" , lowercase_ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
f"""has to be one of: { ', '.join(log_levels.keys() ) }""" )
return _default_log_level
def UpperCamelCase () -> str:
return __name__.split(""".""" )[0]
def UpperCamelCase () -> logging.Logger:
return logging.getLogger(_get_library_name() )
def UpperCamelCase () -> None:
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
A__ : Tuple = logging.StreamHandler() # Set sys.stderr as stream.
A__ : Union[str, Any] = sys.stderr.flush
# Apply our default configuration to the library root logger.
A__ : Optional[int] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
A__ : str = False
def UpperCamelCase () -> None:
global _default_handler
with _lock:
if not _default_handler:
return
A__ : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
A__ : Dict = None
def UpperCamelCase () -> Dict:
return log_levels
def UpperCamelCase (lowercase_: Optional[str] = None ) -> logging.Logger:
if name is None:
A__ : List[Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(lowercase_ )
def UpperCamelCase () -> int:
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCamelCase (lowercase_: int ) -> None:
_configure_library_root_logger()
_get_library_root_logger().setLevel(lowercase_ )
def UpperCamelCase () -> Union[str, Any]:
return set_verbosity(lowercase_ )
def UpperCamelCase () -> List[str]:
return set_verbosity(lowercase_ )
def UpperCamelCase () -> Any:
return set_verbosity(lowercase_ )
def UpperCamelCase () -> List[str]:
return set_verbosity(lowercase_ )
def UpperCamelCase () -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def UpperCamelCase () -> None:
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def UpperCamelCase (lowercase_: logging.Handler ) -> None:
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(lowercase_ )
def UpperCamelCase (lowercase_: logging.Handler ) -> None:
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(lowercase_ )
def UpperCamelCase () -> None:
_configure_library_root_logger()
A__ : Dict = False
def UpperCamelCase () -> None:
_configure_library_root_logger()
A__ : List[str] = True
def UpperCamelCase () -> None:
A__ : List[str] = _get_library_root_logger().handlers
for handler in handlers:
A__ : Union[str, Any] = logging.Formatter("""[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s""" )
handler.setFormatter(lowercase_ )
def UpperCamelCase () -> None:
A__ : Dict = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(lowercase_ )
def UpperCamelCase (self: Tuple , *lowercase_: int , **lowercase_: List[Any] ) -> Optional[Any]:
A__ : int = os.getenv("""TRANSFORMERS_NO_ADVISORY_WARNINGS""" , lowercase_ )
if no_advisory_warnings:
return
self.warning(*lowercase_ , **lowercase_ )
A_ : int = warning_advice
@functools.lru_cache(lowercase_ )
def UpperCamelCase (self: Any , *lowercase_: List[str] , **lowercase_: Dict ) -> Optional[int]:
self.warning(*lowercase_ , **lowercase_ )
A_ : Tuple = warning_once
class _a :
'''simple docstring'''
def __init__( self , *A__ , **A__ ): # pylint: disable=unused-argument
A__ : int = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , A__ ):
def empty_fn(*A__ , **A__ ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , A__ , A__ , A__ ):
return
class _a :
'''simple docstring'''
def __call__( self , *A__ , **A__ ):
if _tqdm_active:
return tqdm_lib.tqdm(*A__ , **A__ )
else:
return EmptyTqdm(*A__ , **A__ )
def __A ( self , *A__ , **A__ ):
A__ : List[Any] = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A__ , **A__ )
def __A ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A_ : List[Any] = _tqdm_cls()
def UpperCamelCase () -> bool:
global _tqdm_active
return bool(_tqdm_active )
def UpperCamelCase () -> List[str]:
global _tqdm_active
A__ : int = True
hf_hub_utils.enable_progress_bars()
def UpperCamelCase () -> Optional[Any]:
global _tqdm_active
A__ : Tuple = False
hf_hub_utils.disable_progress_bars()
| 369
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
A_ : Optional[int] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
A_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
a__ : Optional[Any] =logging.get_logger(__name__)
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : str , *__A : Optional[Any] , **__A : List[Any] ):
warnings.warn(
'The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DPTImageProcessor instead.' , __A , )
super().__init__(*__A , **__A )
| 53
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __snake_case :
__lowerCamelCase = XGLMConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=14 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=99 , __UpperCamelCase=32 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=37 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=0.0_2 , ) -> str:
'''simple docstring'''
snake_case__ : Any = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : Optional[int] = use_input_mask
snake_case__ : Any = use_labels
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = d_model
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : str = ffn_dim
snake_case__ : Optional[Any] = activation_function
snake_case__ : str = activation_dropout
snake_case__ : int = attention_dropout
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Optional[int] = initializer_range
snake_case__ : List[str] = None
snake_case__ : List[str] = 0
snake_case__ : Optional[int] = 2
snake_case__ : Union[str, Any] = 1
def __a ( self ) -> List[str]:
'''simple docstring'''
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : List[str] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[Any] = self.get_config()
snake_case__ : Optional[int] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Any:
'''simple docstring'''
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=__UpperCamelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=__UpperCamelCase , )
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ : Any = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Tuple = config_and_inputs
snake_case__ : Tuple = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class __snake_case ( _lowerCamelCase ,_lowerCamelCase ,unittest.TestCase ):
__lowerCamelCase = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFXGLMForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFXGLMModel, """text-generation""": TFXGLMForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Union[str, Any] = TFXGLMModelTester(self )
snake_case__ : Optional[int] = ConfigTester(self , config_class=__UpperCamelCase , n_embd=37 )
def __a ( self ) -> int:
'''simple docstring'''
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = TFXGLMModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def __a ( self ) -> Any:
'''simple docstring'''
super().test_resize_token_embeddings()
@require_tf
class __snake_case ( unittest.TestCase ):
@slow
def __a ( self , __UpperCamelCase=True ) -> int:
'''simple docstring'''
snake_case__ : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
snake_case__ : List[str] = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
snake_case__ : int = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , __UpperCamelCase )
@slow
def __a ( self ) -> List[str]:
'''simple docstring'''
snake_case__ : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Dict = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
snake_case__ : Any = tokenizer('Today is a nice day and' , return_tensors='tf' )
snake_case__ : Dict = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
snake_case__ : Optional[int] = model.generate(__UpperCamelCase , do_sample=__UpperCamelCase , seed=[7, 0] )
snake_case__ : int = tokenizer.decode(output_ids[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : str = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : str = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
snake_case__ : Optional[int] = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
snake_case__ : Any = 'left'
# use different length sentences to test batching
snake_case__ : int = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
snake_case__ : Any = tokenizer(__UpperCamelCase , return_tensors='tf' , padding=__UpperCamelCase )
snake_case__ : List[Any] = inputs['input_ids']
snake_case__ : List[str] = model.generate(input_ids=__UpperCamelCase , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
snake_case__ : Union[str, Any] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
snake_case__ : str = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
snake_case__ : Dict = model.generate(input_ids=__UpperCamelCase , max_new_tokens=12 )
snake_case__ : List[Any] = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Dict = tokenizer.decode(output_padded[0] , skip_special_tokens=__UpperCamelCase )
snake_case__ : Union[str, Any] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , [non_padded_sentence, padded_sentence] )
| 143
| 0
|
from heapq import heappop, heappush
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,) -> tuple[float | int, list[tuple[int, int]]]:
snake_case : Dict = grid.shape
snake_case : List[Any] = [-1, 1, 0, 0]
snake_case : Any = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
snake_case : Optional[int] = [(0, source)], set()
snake_case : Optional[int] = np.full((rows, cols) ,np.inf )
snake_case : Union[str, Any] = 0
snake_case : List[Any] = np.empty((rows, cols) ,dtype=lowercase )
snake_case : Optional[Any] = None
while queue:
(snake_case) : List[str] = heappop(lowercase )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
snake_case : Dict = []
while (x, y) != source:
path.append((x, y) )
snake_case : Optional[int] = predecessors[x, y]
path.append(lowercase ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowercase ) ):
snake_case : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
snake_case : Optional[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowercase ,(dist + 1, (nx, ny)) )
snake_case : Optional[Any] = dist + 1
snake_case : int = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Any = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 176
| 0
|
from __future__ import annotations
def A (__A : list[float] , __A : list[float] ) -> float:
"""simple docstring"""
UpperCAmelCase_ = sorted(numsa + numsa )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(len(__A ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ : Optional[int] = [float(x) for x in input("Enter the elements of first array: ").split()]
snake_case_ : Tuple = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 51
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase_ = '\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)["depth"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline("depth-estimation")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to("cuda")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to("cuda")\n\n\n >>> img = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/cat.png"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to("cuda")\n\n >>> prompt = "A robot, 4k photo"\n >>> negative_prior_prompt = "lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature"\n\n >>> generator = torch.Generator(device="cuda").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save("robot_cat.png")\n ```\n'
def lowerCamelCase__ ( A__ : Optional[int] , A__ : Dict , A__ : Optional[int]=8 ):
'''simple docstring'''
__lowerCamelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowerCamelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: List[Any] , UpperCamelCase_: UNetaDConditionModel , UpperCamelCase_: DDPMScheduler , UpperCamelCase_: VQModel , ):
super().__init__()
self.register_modules(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , )
__lowerCamelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Any , UpperCamelCase_: Tuple , UpperCamelCase_: int , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: int ):
if latents is None:
__lowerCamelCase = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__lowerCamelCase = latents.to(UpperCamelCase_ )
__lowerCamelCase = latents * scheduler.init_noise_sigma
return latents
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
__lowerCamelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , UpperCamelCase_: Optional[int]=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
__lowerCamelCase = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCamelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowerCamelCase, __lowerCamelCase = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ )
# We'll offload the last model manually.
__lowerCamelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowerCAmelCase__ ( self: int ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase_ )
def __call__( self: Tuple , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase_: torch.FloatTensor , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 5_12 , UpperCamelCase_: int = 1_00 , UpperCamelCase_: float = 4.0 , UpperCamelCase_: int = 1 , UpperCamelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_: Optional[torch.FloatTensor] = None , UpperCamelCase_: Optional[str] = "pil" , UpperCamelCase_: bool = True , ):
__lowerCamelCase = self._execution_device
__lowerCamelCase = guidance_scale > 1.0
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = torch.cat(UpperCamelCase_ , dim=0 )
__lowerCamelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowerCamelCase = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = hint.repeat_interleave(UpperCamelCase_ , dim=0 )
__lowerCamelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
__lowerCamelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ )
self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ )
__lowerCamelCase = self.scheduler.timesteps
__lowerCamelCase = self.movq.config.latent_channels
__lowerCamelCase, __lowerCamelCase = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor )
# create initial latent
__lowerCamelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ):
# expand the latents if we are doing classifier free guidance
__lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCamelCase = {"""image_embeds""": image_embeds, """hint""": hint}
__lowerCamelCase = self.unet(
sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
if do_classifier_free_guidance:
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCamelCase, __lowerCamelCase = noise_pred.chunk(2 )
__lowerCamelCase, __lowerCamelCase = variance_pred.chunk(2 )
__lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCamelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCamelCase, __lowerCamelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0]
# post-processing
__lowerCamelCase = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__lowerCamelCase = image * 0.5 + 0.5
__lowerCamelCase = image.clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 12
| 0
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0, 2, 4, 6, 8]
SCREAMING_SNAKE_CASE__ = [1, 3, 5, 7, 9]
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
UpperCamelCase = 0
for digit in range(10 ):
UpperCamelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , __UpperCamelCase , __UpperCamelCase )
return result
UpperCamelCase = 0
for digita in range(10 ):
UpperCamelCase = digita
if (remainder + digita) % 2 == 0:
UpperCamelCase = ODD_DIGITS
else:
UpperCamelCase = EVEN_DIGITS
for digita in other_parity_digits:
UpperCamelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , __UpperCamelCase , __UpperCamelCase , )
return result
def lowercase__ ( __UpperCamelCase = 9 )-> int:
UpperCamelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(__UpperCamelCase , 0 , [0] * length , __UpperCamelCase )
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 183
|
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowercase__ ( )-> Tuple:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(__UpperCamelCase )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(__UpperCamelCase )
if not mpi_options.get("""sagemaker_mpi_enabled""" , __UpperCamelCase ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class a_ ( lowerCamelCase ):
lowercase = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" , _SCREAMING_SNAKE_CASE , )
@cached_property
def A__ ( self ) -> "torch.device":
"""simple docstring"""
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
UpperCamelCase = torch.device("""cpu""" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("""cuda""" , _SCREAMING_SNAKE_CASE )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("""cuda""" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(_SCREAMING_SNAKE_CASE )
return device
@property
def A__ ( self ) -> Tuple:
"""simple docstring"""
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
return not is_sagemaker_model_parallel_available()
@property
def A__ ( self ) -> str:
"""simple docstring"""
return False
| 183
| 1
|
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
__lowerCAmelCase = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert("RGB" )
__lowerCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
__lowerCAmelCase = transform(_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
return image
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if "visual_encoder" in key:
__lowerCAmelCase = re.sub("visual_encoder*" , "vision_model.encoder" , _UpperCamelCase )
if "blocks" in key:
__lowerCAmelCase = re.sub(R"blocks" , "layers" , _UpperCamelCase )
if "attn" in key:
__lowerCAmelCase = re.sub(R"attn" , "self_attn" , _UpperCamelCase )
if "norm1" in key:
__lowerCAmelCase = re.sub(R"norm1" , "layer_norm1" , _UpperCamelCase )
if "norm2" in key:
__lowerCAmelCase = re.sub(R"norm2" , "layer_norm2" , _UpperCamelCase )
if "encoder.norm" in key:
__lowerCAmelCase = re.sub(R"encoder.norm" , "post_layernorm" , _UpperCamelCase )
if "encoder.patch_embed.proj" in key:
__lowerCAmelCase = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , _UpperCamelCase )
if "encoder.pos_embed" in key:
__lowerCAmelCase = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , _UpperCamelCase )
if "encoder.cls_token" in key:
__lowerCAmelCase = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , _UpperCamelCase )
if "self_attn" in key:
__lowerCAmelCase = re.sub(R"self_attn.proj" , "self_attn.projection" , _UpperCamelCase )
return key
@torch.no_grad()
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase=None ):
'''simple docstring'''
if config_path is not None:
__lowerCAmelCase = BlipConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
__lowerCAmelCase = BlipForConditionalGeneration(_UpperCamelCase ).eval()
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
__lowerCAmelCase = blip_decoder(pretrained=_UpperCamelCase , image_size=384 , vit="base" )
__lowerCAmelCase = pt_model.eval()
__lowerCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
hf_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = 384
__lowerCAmelCase = load_demo_image(image_size=_UpperCamelCase , device="cpu" )
__lowerCAmelCase = BertTokenizer.from_pretrained("bert-base-uncased" )
__lowerCAmelCase = tokenizer(["a picture of"] ).input_ids
__lowerCAmelCase = hf_model.generate(_UpperCamelCase , _UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
__lowerCAmelCase = hf_model.generate(_UpperCamelCase )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
__lowerCAmelCase = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
__lowerCAmelCase = blip_vqa(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
vqa_model.eval()
__lowerCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForQuestionAnswering(_UpperCamelCase )
hf_vqa_model.load_state_dict(_UpperCamelCase )
__lowerCAmelCase = ["How many dogs are in this image?"]
__lowerCAmelCase = tokenizer(_UpperCamelCase , return_tensors="pt" ).input_ids
__lowerCAmelCase = hf_vqa_model.generate(_UpperCamelCase , _UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
__lowerCAmelCase = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
__lowerCAmelCase = blip_itm(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit="base" )
itm_model.eval()
__lowerCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
__lowerCAmelCase = modified_state_dict.pop(_UpperCamelCase )
__lowerCAmelCase = rename_key(_UpperCamelCase )
__lowerCAmelCase = value
__lowerCAmelCase = BlipForImageTextRetrieval(_UpperCamelCase )
__lowerCAmelCase = ["A picture of a woman with a dog sitting in a beach"]
__lowerCAmelCase = tokenizer(
_UpperCamelCase , return_tensors="pt" , padding="max_length" , truncation=_UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCamelCase )
hf_itm_model.eval()
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
__lowerCAmelCase = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
A : Optional[int] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 57
|
"""simple docstring"""
from manim import *
class lowercase ( __UpperCAmelCase):
def a_ ( self : int ):
"""simple docstring"""
A_ : List[str] = Rectangle(height=0.5 , width=0.5 )
A_ : List[Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : Optional[int] = [mem.copy() for i in range(6 )]
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Optional[int] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = VGroup(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Dict = Text('''CPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_lowerCamelCase )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : int = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : List[str] = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
gpu.align_to(_lowerCamelCase , _lowerCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_lowerCamelCase )
A_ : List[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_lowerCamelCase ).arrange(_lowerCamelCase , buff=0 )
A_ : Any = Text('''Model''' , font_size=24 )
A_ : Optional[int] = Group(_lowerCamelCase , _lowerCamelCase ).arrange(_lowerCamelCase , buff=0.5 , aligned_edge=_lowerCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , Create(_lowerCamelCase , run_time=1 ) , )
A_ : List[str] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
A_ : Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Dict = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_lowerCamelCase , run_time=2.5 ) , Write(_lowerCamelCase ) , Write(_lowerCamelCase ) )
self.add(_lowerCamelCase )
A_ : str = []
A_ : Any = []
A_ : Tuple = []
for i, rect in enumerate(_lowerCamelCase ):
A_ : str = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_lowerCamelCase , opacity=0.7 )
cpu_target.move_to(_lowerCamelCase )
cpu_target.generate_target()
A_ : List[str] = 0.46 / 4
A_ : List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_lowerCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_lowerCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_lowerCamelCase , buff=0.0 )
cpu_targs.append(_lowerCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_lowerCamelCase ) )
second_animations.append(MoveToTarget(_lowerCamelCase , run_time=1.5 ) )
self.play(*_lowerCamelCase )
self.play(*_lowerCamelCase )
self.wait()
| 167
| 0
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _snake_case ( lowercase__ : str ) -> str:
'''simple docstring'''
def wrapper(*lowercase__ : Optional[int] , **lowercase__ : List[str] ):
lowerCAmelCase_ :Dict = timeit.default_timer()
lowerCAmelCase_ :Any = func(*__lowerCamelCase , **__lowerCamelCase )
lowerCAmelCase_ :Dict = timeit.default_timer() - starttime
return delta
lowerCAmelCase_ :Dict = func.__name__
return wrapper
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : List[str]=1_0_0 , lowercase__ : Dict=None ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = []
lowerCAmelCase_ :List[str] = seq_shapes or {}
for i in range(__lowerCamelCase ):
lowerCAmelCase_ :List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCamelCase , _ArrayXD ):
lowerCAmelCase_ :int = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCamelCase , datasets.Value ):
if v.dtype == "string":
lowerCAmelCase_ :Union[str, Any] = "The small grey turtle was surprisingly fast when challenged."
else:
lowerCAmelCase_ :Dict = np.random.randint(1_0 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCamelCase , datasets.Sequence ):
while isinstance(__lowerCamelCase , datasets.Sequence ):
lowerCAmelCase_ :Dict = v.feature
lowerCAmelCase_ :str = seq_shapes[k]
lowerCAmelCase_ :List[Any] = np.random.rand(*__lowerCamelCase ).astype(v.dtype )
lowerCAmelCase_ :Optional[Any] = data
dummy_data.append((i, example) )
return dummy_data
def _snake_case ( lowercase__ : List[str] , lowercase__ : Tuple , lowercase__ : int=1_0_0 , lowercase__ : Any=None ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase )
with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase_ :Optional[int] = features.encode_example(__lowerCamelCase )
writer.write(__lowerCamelCase )
lowerCAmelCase_ :Dict = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowerCAmelCase_ :Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) )
return dataset
| 350
|
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def _snake_case ( lowercase__ : Accelerator , lowercase__ : int = 1_6 , lowercase__ : str = "bert-base-cased" ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = AutoTokenizer.from_pretrained(lowercase__ )
lowerCAmelCase_ :Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase__ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase_ :str = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase_ :str = datasets.map(
lowercase__ , batched=lowercase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowercase__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase_ :List[str] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase__ : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowercase__ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowercase__ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
lowerCAmelCase_ :Optional[int] = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
lowerCAmelCase_ :Any = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=lowercase__ )
return train_dataloader, eval_dataloader
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] , lowercase__ : Tuple , lowercase__ : int ) -> List[str]:
'''simple docstring'''
model.eval()
lowerCAmelCase_ :Dict = 0
for step, batch in enumerate(lowercase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Optional[int] = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowercase__ ) - 1:
lowerCAmelCase_ :Optional[Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase_ :Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowercase__ , references=lowercase__ , )
lowerCAmelCase_ :Tuple = metric.compute()
return eval_metric["accuracy"]
def _snake_case ( lowercase__ : str , lowercase__ : List[str] ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase_ :int = config["""lr"""]
lowerCAmelCase_ :Union[str, Any] = int(config["""num_epochs"""] )
lowerCAmelCase_ :Optional[int] = int(config["""seed"""] )
lowerCAmelCase_ :Union[str, Any] = int(config["""batch_size"""] )
lowerCAmelCase_ :Optional[Any] = args.model_name_or_path
set_seed(lowercase__ )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = get_dataloaders(lowercase__ , lowercase__ , lowercase__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase_ :str = AutoModelForSequenceClassification.from_pretrained(lowercase__ , return_dict=lowercase__ )
# Instantiate optimizer
lowerCAmelCase_ :List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase_ :str = optimizer_cls(params=model.parameters() , lr=lowercase__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase_ :Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
lowerCAmelCase_ :Any = 1
lowerCAmelCase_ :str = (len(lowercase__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase_ :List[str] = get_linear_schedule_with_warmup(
optimizer=lowercase__ , num_warmup_steps=0 , num_training_steps=lowercase__ , )
else:
lowerCAmelCase_ :int = DummyScheduler(lowercase__ , total_num_steps=lowercase__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = accelerator.prepare(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase_ :List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase_ :List[Any] = 0
lowerCAmelCase_ :str = evaluate.load("""glue""" , """mrpc""" )
lowerCAmelCase_ :Optional[Any] = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase_ :Dict = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase_ :Optional[Any] = args.resume_from_checkpoint.split("""epoch_""" )[1]
lowerCAmelCase_ :int = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase_ :Union[str, Any] = int(lowercase__ ) + 1
lowerCAmelCase_ :Optional[int] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
accelerator.print("""resumed checkpoint performance:""" , lowercase__ )
accelerator.print("""resumed checkpoint's scheduler's lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers's lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , f"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
lowerCAmelCase_ :List[str] = json.load(lowercase__ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase_ :List[Any] = {}
for epoch in range(lowercase__ , lowercase__ ):
model.train()
for step, batch in enumerate(lowercase__ ):
lowerCAmelCase_ :Optional[int] = model(**lowercase__ )
lowerCAmelCase_ :Dict = outputs.loss
lowerCAmelCase_ :int = loss / gradient_accumulation_steps
accelerator.backward(lowercase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase_ :List[str] = f"""epoch_{epoch}"""
lowerCAmelCase_ :Any = os.path.join(args.output_dir , lowercase__ )
accelerator.save_state(lowercase__ )
lowerCAmelCase_ :List[Any] = evaluation_loop(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase_ :Union[str, Any] = accuracy
lowerCAmelCase_ :Any = lr_scheduler.get_lr()[0]
lowerCAmelCase_ :str = optimizer.param_groups[0]["""lr"""]
lowerCAmelCase_ :List[Any] = epoch
lowerCAmelCase_ :Tuple = overall_step
accelerator.print(f"""epoch {epoch}:""" , lowercase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(lowercase__ , lowercase__ )
def _snake_case ( ) -> int:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowercase__ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowercase__ , )
parser.add_argument(
"""--output_dir""" , type=lowercase__ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowercase__ , default=lowercase__ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowercase__ , default=lowercase__ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowercase__ , default=2 , help="""Number of train epochs.""" , )
lowerCAmelCase_ :Optional[int] = parser.parse_args()
lowerCAmelCase_ :List[Any] = {"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 1
| 0
|
import copy
import random
from transformers import CLIPTokenizer
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,*__lowerCamelCase ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
super().__init__(*__lowercase ,**__lowercase )
lowerCAmelCase__ : str = {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ,**__lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = super().add_tokens(__lowercase ,*__lowercase ,**__lowercase )
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''' )
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ,__lowerCamelCase=1 ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = []
if num_vec_per_token == 1:
self.try_adding_tokens(__lowercase ,*__lowercase ,**__lowercase )
output.append(__lowercase )
else:
lowerCAmelCase__ : int = []
for i in range(__lowercase ):
lowerCAmelCase__ : Optional[int] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(__lowercase ,*__lowercase ,**__lowercase )
output.append(__lowercase )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""" )
lowerCAmelCase__ : Any = output
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=1.0 ) -> int:
"""simple docstring"""
if isinstance(__lowercase ,__lowercase ):
lowerCAmelCase__ : Union[str, Any] = []
for i in range(len(__lowercase ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] ,vector_shuffle=__lowercase ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
lowerCAmelCase__ : Any = self.token_map[placeholder_token]
lowerCAmelCase__ : Union[str, Any] = tokens[: 1 + int(len(__lowercase ) * prop_tokens_to_load )]
if vector_shuffle:
lowerCAmelCase__ : int = copy.copy(__lowercase )
random.shuffle(__lowercase )
lowerCAmelCase__ : str = text.replace(__lowercase ,''' '''.join(__lowercase ) )
return text
def __call__(self ,__lowerCamelCase ,*__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=1.0 ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
__lowercase ,vector_shuffle=__lowercase ,prop_tokens_to_load=__lowercase ) ,*__lowercase ,**__lowercase ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,*__lowerCamelCase ,__lowerCamelCase=False ,__lowerCamelCase=1.0 ,**__lowerCamelCase ) -> str:
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
__lowercase ,vector_shuffle=__lowercase ,prop_tokens_to_load=__lowercase ) ,*__lowercase ,**__lowercase ,)
| 129
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_5_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "pytorch",
"script": "run_ddp.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.7, "eval_loss": 0.6},
},
{
"framework": "tensorflow",
"script": "run_tf_dist.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.p3.16xlarge",
"results": {"train_runtime": 6_0_0, "eval_accuracy": 0.6, "eval_loss": 0.7},
},
] )
class lowerCAmelCase ( unittest.TestCase ):
def snake_case ( self : int ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding='utf-8' , check=__lowercase , )
assert hasattr(self , 'env' )
def snake_case ( self : Tuple , __lowercase : List[str] ):
"""simple docstring"""
__lowercase =f'''{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}'''
# distributed data settings
__lowercase ={'smdistributed': {'dataparallel': {'enabled': True}}} if self.script != 'run_ddp.py' else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowercase , instance_count=__lowercase , instance_type=self.instance_type , debugger_hook_config=__lowercase , hyperparameters={**self.env.distributed_hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowercase , py_version='py36' , )
def snake_case ( self : int , __lowercase : List[str] ):
"""simple docstring"""
TrainingJobAnalytics(__lowercase ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(2,)] )
def snake_case ( self : Tuple , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =self.create_estimator(__lowercase )
# run training
estimator.fit()
# result dataframe
__lowercase =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
__lowercase =list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase =(
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , __lowercase )
| 141
| 0
|
'''simple docstring'''
__snake_case : Optional[int] = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 18
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def _UpperCAmelCase ( _UpperCamelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
A_ = subparsers.add_parser('''env''' )
else:
A_ = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''', default=_UpperCamelCase, help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def _UpperCAmelCase ( _UpperCamelCase : Dict ) -> Dict:
A_ = torch.__version__
A_ = torch.cuda.is_available()
A_ = is_xpu_available()
A_ = is_npu_available()
A_ = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
A_ = load_config_from_file(args.config_file ).to_dict()
A_ = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(_UpperCamelCase ),
'''PyTorch NPU available''': str(_UpperCamelCase ),
'''System RAM''': F'''{psutil.virtual_memory().total / 10_24 ** 3:.2f} GB''',
}
if pt_cuda_available:
A_ = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
A_ = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_UpperCamelCase, _UpperCamelCase )
else F'''\t{accelerate_config}'''
)
print(_UpperCamelCase )
A_ = accelerate_config
return info
def _UpperCAmelCase ( ) -> int:
A_ = env_command_parser()
A_ = parser.parse_args()
env_command(_UpperCamelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 18
| 1
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowerCAmelCase__ :List[str] = 1_0
def lowerCAmelCase__ ( a__: Any , a__: str , a__: int , a__: Dict ) -> int:
'''simple docstring'''
for i in range(UpperCamelCase_ , UpperCamelCase_ ):
if array[i] == target:
return i
return -1
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int] ) -> int:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = len(UpperCamelCase_ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_UpperCAmelCase = (left + right) // 3 + 1
_UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_UpperCAmelCase = one_third - 1
elif array[two_third] < target:
_UpperCAmelCase = two_third + 1
else:
_UpperCAmelCase = one_third + 1
_UpperCAmelCase = two_third - 1
else:
return -1
def lowerCAmelCase__ ( a__: Union[str, Any] , a__: Optional[int] , a__: str , a__: Optional[int] ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
_UpperCAmelCase = (left + right) // 3 + 1
_UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase_ , one_third - 1 , UpperCamelCase_ , UpperCamelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase_ , UpperCamelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ :List[Any] = input('''Enter numbers separated by comma:\n''').strip()
lowerCAmelCase__ :Optional[Any] = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowerCAmelCase__ :List[Any] = int(input('''Enter the number to be found in the list:\n''').strip())
lowerCAmelCase__ :Optional[Any] = ite_ternary_search(collection, target)
lowerCAmelCase__ :Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print('''Not found''')
| 329
|
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( _UpperCAmelCase , unittest.TestCase ):
A__ : Dict =LxmertTokenizer
A__ : List[Any] =LxmertTokenizerFast
A__ : Any =True
A__ : List[Any] =True
def A_ ( self : Optional[Any] ):
super().setUp()
SCREAMING_SNAKE_CASE__ = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def A_ ( self : int , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = 'UNwant\u00E9d,running'
SCREAMING_SNAKE_CASE__ = 'unwanted, running'
return input_text, output_text
def A_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(UpperCAmelCase_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [7, 4, 5, 10, 8, 9] )
def A_ ( self : List[str] ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE__ = tokenizer.encode(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = rust_tokenizer.encode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
| 176
| 0
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 368
|
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
SCREAMING_SNAKE_CASE__ : Tuple = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
SCREAMING_SNAKE_CASE__ : List[str] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __lowerCAmelCase : Any ) -> int:
__lowerCamelCase = numpy.dtype(numpy.uintaa ).newbyteorder('''>''' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__lowerCAmelCase )[0]
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> str:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2051:
raise ValueError(
'''Invalid magic number %d in MNIST image file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(rows * cols * num_images )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
__lowerCamelCase = data.reshape(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , 1 )
return data
@deprecated(__lowerCAmelCase , '''Please use tf.one_hot on tensors.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : List[Any] ) -> Dict:
__lowerCamelCase = labels_dense.shape[0]
__lowerCamelCase = numpy.arange(__lowerCAmelCase ) * num_classes
__lowerCamelCase = numpy.zeros((num_labels, num_classes) )
__lowerCamelCase = 1
return labels_one_hot
@deprecated(__lowerCAmelCase , '''Please use tf.data to implement this functionality.''' )
def __magic_name__ ( __lowerCAmelCase : Any , __lowerCAmelCase : str=False , __lowerCAmelCase : List[str]=10 ) -> List[str]:
print('''Extracting''' , f.name )
with gzip.GzipFile(fileobj=__lowerCAmelCase ) as bytestream:
__lowerCamelCase = _readaa(__lowerCAmelCase )
if magic != 2049:
raise ValueError(
'''Invalid magic number %d in MNIST label file: %s''' % (magic, f.name) )
__lowerCamelCase = _readaa(__lowerCAmelCase )
__lowerCamelCase = bytestream.read(__lowerCAmelCase )
__lowerCamelCase = numpy.frombuffer(__lowerCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__lowerCAmelCase , __lowerCAmelCase )
return labels
class lowerCAmelCase__ :
@deprecated(
SCREAMING_SNAKE_CASE__ , '''Please use alternatives such as official/mnist/_DataSet.py'''
''' from tensorflow/models.''' , )
def __init__( self : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : List[str]=False , SCREAMING_SNAKE_CASE__ : str=dtypes.floataa , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : str=None , ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase = random_seed.get_seed(SCREAMING_SNAKE_CASE__ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__lowerCamelCase = dtypes.as_dtype(SCREAMING_SNAKE_CASE__ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('''Invalid image dtype %r, expected uint8 or float32''' % dtype )
if fake_data:
__lowerCamelCase = 1_00_00
__lowerCamelCase = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
__lowerCamelCase = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__lowerCamelCase = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__lowerCamelCase = images.astype(numpy.floataa )
__lowerCamelCase = numpy.multiply(SCREAMING_SNAKE_CASE__ , 1.0 / 255.0 )
__lowerCamelCase = images
__lowerCamelCase = labels
__lowerCamelCase = 0
__lowerCamelCase = 0
@property
def __A ( self : str ) -> Optional[int]:
return self._images
@property
def __A ( self : Any ) -> Dict:
return self._labels
@property
def __A ( self : List[Any] ) -> int:
return self._num_examples
@property
def __A ( self : str ) -> Any:
return self._epochs_completed
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : str=True ) -> str:
if fake_data:
__lowerCamelCase = [1] * 7_84
__lowerCamelCase = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE__ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE__ )],
)
__lowerCamelCase = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perma]
__lowerCamelCase = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__lowerCamelCase = self._num_examples - start
__lowerCamelCase = self._images[start : self._num_examples]
__lowerCamelCase = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__lowerCamelCase = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.images[perm]
__lowerCamelCase = self.labels[perm]
# Start next epoch
__lowerCamelCase = 0
__lowerCamelCase = batch_size - rest_num_examples
__lowerCamelCase = self._index_in_epoch
__lowerCamelCase = self._images[start:end]
__lowerCamelCase = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__lowerCamelCase = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__lowerCAmelCase , '''Please write your own downloading logic.''' )
def __magic_name__ ( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ) -> List[Any]:
if not gfile.Exists(__lowerCAmelCase ):
gfile.MakeDirs(__lowerCAmelCase )
__lowerCamelCase = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
if not gfile.Exists(__lowerCAmelCase ):
urllib.request.urlretrieve(__lowerCAmelCase , __lowerCAmelCase ) # noqa: S310
with gfile.GFile(__lowerCAmelCase ) as f:
__lowerCamelCase = f.size()
print('''Successfully downloaded''' , __lowerCAmelCase , __lowerCAmelCase , '''bytes.''' )
return filepath
@deprecated(
__lowerCAmelCase , '''Please use alternatives such as:''' ''' tensorflow_datasets.load(\'mnist\')''' )
def __magic_name__ ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any]=False , __lowerCAmelCase : Dict=False , __lowerCAmelCase : List[str]=dtypes.floataa , __lowerCAmelCase : Union[str, Any]=True , __lowerCAmelCase : int=5000 , __lowerCAmelCase : Any=None , __lowerCAmelCase : List[str]=DEFAULT_SOURCE_URL , ) -> Optional[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__lowerCAmelCase , one_hot=__lowerCAmelCase , dtype=__lowerCAmelCase , seed=__lowerCAmelCase )
__lowerCamelCase = fake()
__lowerCamelCase = fake()
__lowerCamelCase = fake()
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
if not source_url: # empty string check
__lowerCamelCase = DEFAULT_SOURCE_URL
__lowerCamelCase = '''train-images-idx3-ubyte.gz'''
__lowerCamelCase = '''train-labels-idx1-ubyte.gz'''
__lowerCamelCase = '''t10k-images-idx3-ubyte.gz'''
__lowerCamelCase = '''t10k-labels-idx1-ubyte.gz'''
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + train_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_images_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_images(__lowerCAmelCase )
__lowerCamelCase = _maybe_download(
__lowerCAmelCase , __lowerCAmelCase , source_url + test_labels_file )
with gfile.Open(__lowerCAmelCase , '''rb''' ) as f:
__lowerCamelCase = _extract_labels(__lowerCAmelCase , one_hot=__lowerCAmelCase )
if not 0 <= validation_size <= len(__lowerCAmelCase ):
__lowerCamelCase = (
'''Validation size should be between 0 and '''
f'''{len(__lowerCAmelCase )}. Received: {validation_size}.'''
)
raise ValueError(__lowerCAmelCase )
__lowerCamelCase = train_images[:validation_size]
__lowerCamelCase = train_labels[:validation_size]
__lowerCamelCase = train_images[validation_size:]
__lowerCamelCase = train_labels[validation_size:]
__lowerCamelCase = {'''dtype''': dtype, '''reshape''': reshape, '''seed''': seed}
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
__lowerCamelCase = _DataSet(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
return _Datasets(train=__lowerCAmelCase , validation=__lowerCAmelCase , test=__lowerCAmelCase )
| 339
| 0
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = 50 # max width of layer names
_SCREAMING_SNAKE_CASE : int = 70 # max width of quantizer names
def lowerCamelCase__ ( _lowerCamelCase : List[Any] ) -> int:
lowerCamelCase_ = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_lowerCamelCase , type=_lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> Optional[int]:
if args.calibrator == "max":
lowerCamelCase_ = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
lowerCamelCase_ = 'histogram'
elif args.calibrator == "mse":
lowerCamelCase_ = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
lowerCamelCase_ = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
lowerCamelCase_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any]=False , _lowerCamelCase : List[Any]=False ) -> int:
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['embeddings'] , which='weight' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : Tuple ) -> List[Any]:
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def lowerCamelCase__ ( _lowerCamelCase : Any , _lowerCamelCase : str ) -> Optional[Any]:
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : str , _lowerCamelCase : Tuple ) -> str:
def fusea(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
lowerCamelCase_ = qq._amax.detach().item()
lowerCamelCase_ = qk._amax.detach().item()
lowerCamelCase_ = qv._amax.detach().item()
lowerCamelCase_ = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] ) -> Optional[int]:
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
lowerCamelCase_ = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> List[str]:
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
lowerCamelCase_ = mod.weight.shape[0]
lowerCamelCase_ = mod._weight_quantizer._amax.detach()
lowerCamelCase_ = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def lowerCamelCase__ ( _lowerCamelCase : List[str] ) -> Optional[int]:
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
lowerCamelCase_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
lowerCamelCase_ = set(range(len(mod.weight.size() ) ) ) - axis_set
lowerCamelCase_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
lowerCamelCase_ = amax
def lowerCamelCase__ ( _lowerCamelCase : int , _lowerCamelCase : str=25 , _lowerCamelCase : Any=180 , _lowerCamelCase : str=None ) -> int:
if ignore is None:
lowerCamelCase_ = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = [ignore]
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , 'weight' ):
continue
lowerCamelCase_ = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
lowerCamelCase_ = getattr(_lowerCamelCase , '_input_quantizer' , _lowerCamelCase )
lowerCamelCase_ = getattr(_lowerCamelCase , '_weight_quantizer' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , 'weight' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
lowerCamelCase_ = F'''Act:{input_q.extra_repr()}'''
lowerCamelCase_ = F'''Wgt:{weight_q.extra_repr()}'''
lowerCamelCase_ = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def lowerCamelCase__ ( _lowerCamelCase : int ) -> Optional[int]:
lowerCamelCase_ = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> List[str]:
lowerCamelCase_ = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : Tuple="both" , **_lowerCamelCase : str ) -> int:
lowerCamelCase_ = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_input_quantizer' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_weight_quantizer' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , **_lowerCamelCase : Optional[int] ) -> Optional[Any]:
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_input_quantizer' ) or hasattr(_lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
lowerCamelCase_ = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
| 183
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_SCREAMING_SNAKE_CASE : List[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
super().__init__()
lowerCamelCase_ = torchvision.models.resnetaaa(pretrained=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = list(model.children() )[:-2]
lowerCamelCase_ = nn.Sequential(*__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Any ) -> Any:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCamelCase_ = self.pool(self.model(__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ = torch.flatten(__SCREAMING_SNAKE_CASE , start_dim=2 )
lowerCamelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class a ( __snake_case ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
lowerCamelCase_ = [json.loads(__SCREAMING_SNAKE_CASE ) for l in open(__SCREAMING_SNAKE_CASE )]
lowerCamelCase_ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer
lowerCamelCase_ = labels
lowerCamelCase_ = len(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = max_seq_length
lowerCamelCase_ = transforms
def __len__( self : Any ) -> Any:
return len(self.data )
def __getitem__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> List[str]:
lowerCamelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=__SCREAMING_SNAKE_CASE ) )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCamelCase_ = sentence[: self.max_seq_length]
lowerCamelCase_ = torch.zeros(self.n_classes )
lowerCamelCase_ = 1
lowerCamelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowerCamelCase_ = self.transforms(__SCREAMING_SNAKE_CASE )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCamelCase ( self : Dict ) -> Dict:
lowerCamelCase_ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> str:
lowerCamelCase_ = [len(row['sentence'] ) for row in batch]
lowerCamelCase_ , lowerCamelCase_ = len(_lowerCamelCase ), max(_lowerCamelCase )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
lowerCamelCase_ = torch.zeros(_lowerCamelCase , _lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_lowerCamelCase , _lowerCamelCase ) ):
lowerCamelCase_ = input_row['sentence']
lowerCamelCase_ = 1
lowerCamelCase_ = torch.stack([row['image'] for row in batch] )
lowerCamelCase_ = torch.stack([row['label'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_start_token'] for row in batch] )
lowerCamelCase_ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ) -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 183
| 1
|
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( snake_case_ , unittest.TestCase ):
__magic_name__: str = MvpTokenizer
__magic_name__: str = MvpTokenizerFast
__magic_name__: str = True
__magic_name__: Any = filter_roberta_detectors
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
super().setUp()
snake_case_ : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
snake_case_ : Optional[int] = dict(zip(_A , range(len(_A ) ) ) )
snake_case_ : Optional[int] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ : Optional[Any] = {'unk_token': '<unk>'}
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
def UpperCAmelCase_ ( self : List[Any] , **_A : Any ) -> Union[str, Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self : List[Any] , **_A : List[str] ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self : List[str] , _A : Optional[int] ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
snake_case_ : List[Any] = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Optional[int] = tokenizer(_A , max_length=len(_A ) , padding=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case_ : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
# Test that special tokens are reset
@require_torch
def UpperCAmelCase_ ( self : str ) -> int:
"""simple docstring"""
snake_case_ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Optional[Any] = tokenizer(_A , padding=_A , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('labels' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
@require_torch
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : str = tokenizer(text_target=_A , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def UpperCAmelCase_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Optional[int] = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_A , truncation=_A , return_tensors='pt' )
self.assertIsInstance(_A , _A )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = ['A long paragraph for summarization.']
snake_case_ : Tuple = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case_ : Any = tokenizer(_A , text_target=_A , return_tensors='pt' )
snake_case_ : Union[str, Any] = inputs['input_ids']
snake_case_ : Dict = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Any ) -> List[str]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ : Tuple = self.rust_tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : Tuple = self.tokenizer_class.from_pretrained(_A , **_A )
snake_case_ : Optional[int] = 'A, <mask> AllenNLP sentence.'
snake_case_ : Optional[Any] = tokenizer_r.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
snake_case_ : Dict = tokenizer_p.encode_plus(_A , add_special_tokens=_A , return_token_type_ids=_A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
snake_case_ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
snake_case_ : List[str] = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_A , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 353
|
from __future__ import annotations
import pandas as pd
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Optional[Any] = [0] * no_of_processes
snake_case_ : Tuple = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(__a ):
snake_case_ : Union[str, Any] = burst_time[i]
snake_case_ : Optional[Any] = 0
snake_case_ : Dict = 0
snake_case_ : Any = 9_99_99_99_99
snake_case_ : Tuple = 0
snake_case_ : List[Any] = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(__a ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
snake_case_ : str = remaining_time[j]
snake_case_ : Any = j
snake_case_ : List[str] = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
snake_case_ : Any = remaining_time[short]
if minm == 0:
snake_case_ : Dict = 9_99_99_99_99
if remaining_time[short] == 0:
complete += 1
snake_case_ : List[str] = False
# Find finish time of current process
snake_case_ : List[str] = increment_time + 1
# Calculate waiting time
snake_case_ : Any = finish_time - arrival_time[short]
snake_case_ : Any = finar - burst_time[short]
if waiting_time[short] < 0:
snake_case_ : Optional[int] = 0
# Increment time
increment_time += 1
return waiting_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : Tuple = [0] * no_of_processes
for i in range(__a ):
snake_case_ : str = burst_time[i] + waiting_time[i]
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
snake_case_ : int = 0
snake_case_ : Optional[Any] = 0
for i in range(__a ):
snake_case_ : int = total_waiting_time + waiting_time[i]
snake_case_ : Optional[Any] = total_turn_around_time + turn_around_time[i]
print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_SCREAMING_SNAKE_CASE = int(input())
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = [0] * no_of_processes
_SCREAMING_SNAKE_CASE = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input().split())
_SCREAMING_SNAKE_CASE = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_SCREAMING_SNAKE_CASE = burst_time
_SCREAMING_SNAKE_CASE = no_of_processes
_SCREAMING_SNAKE_CASE = waiting_time
_SCREAMING_SNAKE_CASE = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_SCREAMING_SNAKE_CASE = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 88
| 0
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case_ ( _lowerCAmelCase : List[str] ) -> str:
def wrapper(*_lowerCAmelCase : str , **_lowerCAmelCase : Tuple ):
UpperCAmelCase : int = timeit.default_timer()
UpperCAmelCase : int = func(*_lowerCAmelCase , **_lowerCAmelCase )
UpperCAmelCase : Dict = timeit.default_timer() - starttime
return delta
UpperCAmelCase : List[str] = func.__name__
return wrapper
def snake_case_ ( _lowerCAmelCase : dict , _lowerCAmelCase : Any=100 , _lowerCAmelCase : Optional[int]=None ) -> Optional[int]:
UpperCAmelCase : Tuple = []
UpperCAmelCase : Optional[Any] = seq_shapes or {}
for i in range(_lowerCAmelCase ):
UpperCAmelCase : List[str] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCAmelCase , _ArrayXD ):
UpperCAmelCase : List[str] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
UpperCAmelCase : Any = '''The small grey turtle was surprisingly fast when challenged.'''
else:
UpperCAmelCase : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCAmelCase , datasets.Sequence ):
while isinstance(_lowerCAmelCase , datasets.Sequence ):
UpperCAmelCase : Union[str, Any] = v.feature
UpperCAmelCase : Union[str, Any] = seq_shapes[k]
UpperCAmelCase : List[str] = np.random.rand(*_lowerCAmelCase ).astype(v.dtype )
UpperCAmelCase : List[Any] = data
dummy_data.append((i, example) )
return dummy_data
def snake_case_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any]=100 , _lowerCAmelCase : Dict=None ) -> Union[str, Any]:
UpperCAmelCase : List[str] = generate_examples(_lowerCAmelCase , num_examples=_lowerCAmelCase , seq_shapes=_lowerCAmelCase )
with ArrowWriter(features=_lowerCAmelCase , path=_lowerCAmelCase ) as writer:
for key, record in dummy_data:
UpperCAmelCase : Union[str, Any] = features.encode_example(_lowerCAmelCase )
writer.write(_lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase : int = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
UpperCAmelCase : List[str] = datasets.Dataset.from_file(filename=_lowerCAmelCase , info=datasets.DatasetInfo(features=_lowerCAmelCase ) )
return dataset
| 23
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(snake_case_ , x % y )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : int ) -> int:
'''simple docstring'''
return (x * y) // greatest_common_divisor(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int = 20 ) -> int:
'''simple docstring'''
UpperCAmelCase_ = 1
for i in range(1 , n + 1 ):
UpperCAmelCase_ = lcm(snake_case_ , snake_case_ )
return g
if __name__ == "__main__":
print(f"{solution() = }")
| 1
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
lowercase = {}
lowercase = tokenizer(example['''content'''] , truncation=lowerCAmelCase__ )['''input_ids''']
lowercase = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
lowercase__ :Dict = HfArgumentParser(PretokenizationArguments)
lowercase__ :Optional[Any] = parser.parse_args()
if args.num_workers is None:
lowercase__ :List[Any] = multiprocessing.cpu_count()
lowercase__ :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase__ :int = time.time()
lowercase__ :List[str] = load_dataset(args.dataset_name, split="train")
print(F'Dataset loaded in {time.time()-t_start:.2f}s')
lowercase__ :Tuple = time.time()
lowercase__ :List[Any] = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(F'Dataset tokenized in {time.time()-t_start:.2f}s')
lowercase__ :List[str] = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(F'Data pushed to the hub in {time.time()-t_start:.2f}s')
| 97
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase__ :str = logging.get_logger(__name__)
lowercase__ :Any = {"vocab_file": "sentencepiece.bpe.model"}
lowercase__ :Tuple = {
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
}
lowercase__ :str = {
"moussaKam/mbarthez": 1024,
"moussaKam/barthez": 1024,
"moussaKam/barthez-orangesum-title": 1024,
}
lowercase__ :int = "▁"
class lowercase ( SCREAMING_SNAKE_CASE__ ):
lowercase_ : Union[str, Any] =VOCAB_FILES_NAMES
lowercase_ : List[Any] =PRETRAINED_VOCAB_FILES_MAP
lowercase_ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ : str =['''input_ids''', '''attention_mask''']
def __init__( self ,A__ ,A__="<s>" ,A__="</s>" ,A__="</s>" ,A__="<s>" ,A__="<unk>" ,A__="<pad>" ,A__="<mask>" ,A__ = None ,**A__ ,):
# Mask token behave like a normal word, i.e. include the space before it
lowercase = AddedToken(A__ ,lstrip=A__ ,rstrip=A__) if isinstance(A__ ,A__) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A__ ,eos_token=A__ ,unk_token=A__ ,sep_token=A__ ,cls_token=A__ ,pad_token=A__ ,mask_token=A__ ,sp_model_kwargs=self.sp_model_kwargs ,**A__ ,)
lowercase = vocab_file
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(A__))
lowercase = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase = len(self.sp_model) - 1
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A__ ( self ,A__ ,A__ = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase = [self.cls_token_id]
lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self ,A__ ,A__ = None ,A__ = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ ,token_ids_a=A__ ,already_has_special_tokens=A__)
if token_ids_a is None:
return [1] + ([0] * len(A__)) + [1]
return [1] + ([0] * len(A__)) + [1, 1] + ([0] * len(A__)) + [1]
def A__ ( self ,A__ ,A__ = None):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def A__ ( self):
return len(self.sp_model)
def A__ ( self):
lowercase = {self.convert_ids_to_tokens(A__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def A__ ( self ,A__):
return self.sp_model.encode(A__ ,out_type=A__)
def A__ ( self ,A__):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(A__)
return spm_id if spm_id else self.unk_token_id
def A__ ( self ,A__):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(A__)
def A__ ( self ,A__):
lowercase = []
lowercase = ''''''
lowercase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__) + token
lowercase = True
lowercase = []
else:
current_sub_tokens.append(A__)
lowercase = False
out_string += self.sp_model.decode(A__)
return out_string.strip()
def __getstate__( self):
lowercase = self.__dict__.copy()
lowercase = None
return state
def __setstate__( self ,A__):
lowercase = d
# for backward compatibility
if not hasattr(self ,'''sp_model_kwargs'''):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def A__ ( self ,A__ ,A__ = None):
if not os.path.isdir(A__):
logger.error(f'Vocabulary path ({save_directory}) should be a directory')
return
lowercase = os.path.join(
A__ ,(filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(A__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,A__)
elif not os.path.isfile(self.vocab_file):
with open(A__ ,'''wb''') as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(A__)
return (out_vocab_file,)
| 97
| 1
|
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(lowerCAmelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
lowerCAmelCase : int , lowerCAmelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCAmelCase )
for item in array )
SCREAMING_SNAKE_CASE_ : Optional[int] = answer
return answer
SCREAMING_SNAKE_CASE_ : str = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : list[int] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [0] * (target + 1)
SCREAMING_SNAKE_CASE_ : Tuple = 1
for i in range(1 , target + 1 ):
for j in range(lowerCAmelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Dict = 3
__lowerCamelCase : Optional[int] = 5
__lowerCamelCase : Optional[int] = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 18
|
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class a__ ( A__ ):
def __init__( self : Tuple,_A : Optional[int],_A : Any=13,_A : List[str]=7,_A : int=True,_A : Dict=True,_A : Dict=False,_A : List[Any]=True,_A : Any=99,_A : Optional[int]=32,_A : Any=5,_A : List[Any]=4,_A : Dict=64,_A : Optional[Any]="gelu",_A : Tuple=0.1,_A : Any=0.1,_A : List[Any]=512,_A : Dict=16,_A : Optional[Any]=2,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=4,_A : Union[str, Any]=None,_A : Tuple=2,_A : List[str]=2,_A : str=2,_A : Dict=2,_A : Optional[Any]=4,_A : Union[str, Any]=1,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[int] = batch_size
SCREAMING_SNAKE_CASE_ : Dict = seq_length
SCREAMING_SNAKE_CASE_ : Dict = is_training
SCREAMING_SNAKE_CASE_ : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE_ : int = use_token_type_ids
SCREAMING_SNAKE_CASE_ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE_ : Tuple = vocab_size
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : str = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = type_vocab_size
SCREAMING_SNAKE_CASE_ : List[str] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Tuple = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = num_choices
SCREAMING_SNAKE_CASE_ : Dict = scope
SCREAMING_SNAKE_CASE_ : int = q_groups
SCREAMING_SNAKE_CASE_ : Tuple = k_groups
SCREAMING_SNAKE_CASE_ : List[Any] = v_groups
SCREAMING_SNAKE_CASE_ : Tuple = post_attention_groups
SCREAMING_SNAKE_CASE_ : int = intermediate_groups
SCREAMING_SNAKE_CASE_ : List[Any] = output_groups
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
SCREAMING_SNAKE_CASE_ : List[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Any = None
SCREAMING_SNAKE_CASE_ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ids_tensor([self.batch_size],self.num_choices )
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size,vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,attention_probs_dropout_prob=self.hidden_dropout_prob,attention_dropout=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,initializer_range=self.initializer_range,q_groups=self.q_groups,k_groups=self.k_groups,v_groups=self.v_groups,post_attention_groups=self.post_attention_groups,intermediate_groups=self.intermediate_groups,output_groups=self.output_groups,)
def __UpperCamelCase ( self : Tuple,_A : Union[str, Any],_A : Union[str, Any],_A : int,_A : Optional[int],_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertModel(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Dict,_A : Any,_A : Tuple,_A : str,_A : Any,_A : Union[str, Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = SqueezeBertForMaskedLM(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Optional[int],_A : Union[str, Any],_A : Union[str, Any],_A : Any,_A : Tuple,_A : int,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_A,attention_mask=_A,start_positions=_A,end_positions=_A )
self.parent.assertEqual(result.start_logits.shape,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape,(self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : List[Any],_A : List[str],_A : Tuple,_A : List[Any],_A : List[str],_A : List[str],_A : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = SqueezeBertForSequenceClassification(_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : str,_A : Optional[int],_A : str,_A : List[Any],_A : List[str],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = SqueezeBertForTokenClassification(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_A,attention_mask=_A,labels=_A )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : List[Any],_A : Tuple,_A : str,_A : Optional[Any],_A : int,_A : str,_A : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_choices
SCREAMING_SNAKE_CASE_ : Union[str, Any] = SqueezeBertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = input_ids.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : str = input_mask.unsqueeze(1 ).expand(-1,self.num_choices,-1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Optional[int] = model(
_A,attention_mask=_A,labels=_A,)
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.prepare_config_and_inputs()
((SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_) , (SCREAMING_SNAKE_CASE_)) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( A__ , A__ , unittest.TestCase ):
A = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
A = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A = False
A = True
A = False
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = SqueezeBertModelTester(self )
SCREAMING_SNAKE_CASE_ : List[str] = ConfigTester(self,config_class=_A,dim=37 )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_A )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_A )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_A )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_A )
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Tuple = SqueezeBertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_sentencepiece
@require_tokenizers
@require_torch
class a__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_A )[0]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((1, 3) )
self.assertEqual(output.shape,_A )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[0.6401, -0.0349, -0.6041]] )
self.assertTrue(torch.allclose(_A,_A,atol=1E-4 ) )
| 18
| 1
|
A : Any = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( __a :dict , __a :Any , __a :Any ) -> list[str]:
"""simple docstring"""
A__ = set()
# keep track of all the paths to be checked
A__ = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
A__ = queue.pop(0 )
# get the last node from the path
A__ = path[-1]
if node not in explored:
A__ = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
A__ = list(__a )
new_path.append(__a )
queue.append(__a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__a )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( __a :dict , __a :Optional[int] , __a :Union[str, Any] ) -> int:
"""simple docstring"""
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
A__ = [start]
A__ = set(__a )
# Keep tab on distances from `start` node.
A__ = {start: 0, target: -1}
while queue:
A__ = queue.pop(0 )
if node == target:
A__ = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__a )
queue.append(__a )
A__ = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 276
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def __lowerCamelCase ( __a :int ) -> Optional[int]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = gather(__a )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
A__ = [state.process_index]
A__ = gather_object(__a )
assert len(__a ) == state.num_processes, F'{gathered_obj}, {len(__a )} != {state.num_processes}'
assert gathered_obj == list(range(state.num_processes ) ), F'{gathered_obj} != {list(range(state.num_processes ) )}'
def __lowerCamelCase ( __a :List[str] ) -> List[str]:
"""simple docstring"""
A__ = create_tensor(__a )
A__ = broadcast(__a )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def __lowerCamelCase ( __a :Any ) -> Any:
"""simple docstring"""
if state.is_main_process:
A__ = torch.arange(state.num_processes + 1 ).to(state.device )
else:
A__ = torch.arange(state.num_processes ).to(state.device )
A__ = pad_across_processes(__a )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def __lowerCamelCase ( __a :Union[str, Any] ) -> str:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """sum""" )
A__ = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :List[Any] ) -> List[str]:
"""simple docstring"""
if state.num_processes != 2:
return
A__ = create_tensor(__a )
A__ = reduce(__a , """mean""" )
A__ = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(__a , __a ), F'{reduced_tensor} != {truth_tensor}'
def __lowerCamelCase ( __a :str ) -> Optional[int]:
"""simple docstring"""
main()
def __lowerCamelCase ( ) -> str:
"""simple docstring"""
A__ = PartialState()
state.print(F'State: {state}' )
state.print("""testing gather""" )
test_gather(__a )
state.print("""testing gather_object""" )
test_gather_object(__a )
state.print("""testing broadcast""" )
test_broadcast(__a )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(__a )
state.print("""testing reduce_sum""" )
test_reduce_sum(__a )
state.print("""testing reduce_mean""" )
test_reduce_mean(__a )
if __name__ == "__main__":
main()
| 276
| 1
|
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class _SCREAMING_SNAKE_CASE ( A__ ):
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :str = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> str:
with self.assertRaises(__A ):
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def __lowerCAmelCase ( self ) -> str:
with self.assertRaises(__A ):
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Dict:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCAmelCase_ :List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :str = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def __lowerCAmelCase ( self ) -> Optional[Any]:
import PIL.Image
lowerCAmelCase_ :Dict = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__A ) as mock_cast_to_python_objects:
lowerCAmelCase_ :Dict = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __A )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : int ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :str = pa.BufferReader(lowercase__ ) if isinstance(lowercase__ , pa.Buffer ) else pa.memory_map(lowercase__ )
lowerCAmelCase_ :Any = pa.ipc.open_stream(lowercase__ )
lowerCAmelCase_ :pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : int , lowercase__ : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :Tuple = pa.BufferOutputStream()
lowerCAmelCase_ :Any = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
lowerCAmelCase_ :Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=lowercase__ , features=lowercase__ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCAmelCase_ :List[str] = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :Tuple = pa.ipc.open_stream(lowercase__ )
lowerCAmelCase_ :pa.Table = f.read_all()
lowerCAmelCase_ :List[str] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(lowercase__ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _snake_case ( lowercase__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _snake_case ( lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :List[str] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
with pytest.raises(lowercase__ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _snake_case ( lowercase__ : Tuple ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=lowercase__ , writer_batch_size=lowercase__ , hash_salt="""split_name""" , check_duplicates=lowercase__ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
lowerCAmelCase_ , lowerCAmelCase_ :Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : List[str] , lowercase__ : List[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
lowerCAmelCase_ :str = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :Optional[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : str , lowercase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :List[Any] = pa.BufferOutputStream()
lowerCAmelCase_ :Optional[Any] = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :int = pa.BufferOutputStream()
lowerCAmelCase_ :Dict = pa.schema(lowercase__ ) if fields else None
with ArrowWriter(stream=lowercase__ , schema=lowercase__ , writer_batch_size=lowercase__ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCAmelCase_ :str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ :List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
lowerCAmelCase_ :Any = os.path.join(lowercase__ , """test.arrow""" )
with ArrowWriter(path=lowercase__ , schema=pa.schema(lowercase__ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
lowerCAmelCase_ , lowerCAmelCase_ :Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(lowercase__ , metadata=writer._schema.metadata )
_check_output(lowercase__ , 1 )
def _snake_case ( lowercase__ : Optional[int] ) -> int:
'''simple docstring'''
if pa.types.is_list(lowercase__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _snake_case ( lowercase__ : Optional[int] , lowercase__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
if isinstance(lst[0] , lowercase__ ):
change_first_primitive_element_in_list(lst[0] , lowercase__ )
else:
lowerCAmelCase_ :Dict = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( lowercase__ : Dict , lowercase__ : Tuple , lowercase__ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ :Optional[Any] = pa.array(TypedSequence(lowercase__ , optimized_int_type=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _snake_case ( lowercase__ : int , lowercase__ : List[str] , lowercase__ : int ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCAmelCase_ :int = copy.deepcopy(lowercase__ )
lowerCAmelCase_ :Tuple = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(lowercase__ , lowercase__ )
lowerCAmelCase_ :Optional[Any] = pa.array(OptimizedTypedSequence(lowercase__ , col=lowercase__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _snake_case ( lowercase__ : int , lowercase__ : Tuple ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ :Union[str, Any] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=lowercase__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _snake_case ( lowercase__ : Optional[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase_ :Tuple = """mock://dataset-train.arrow"""
with ArrowWriter(path=lowercase__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(lowercase__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(lowercase__ )
def _snake_case ( ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ :Optional[int] = pa.BufferOutputStream()
with ParquetWriter(stream=lowercase__ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
lowerCAmelCase_ , lowerCAmelCase_ :int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCAmelCase_ :Any = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _snake_case ( lowercase__ : str , lowercase__ : List[Any] ) -> List[Any]:
'''simple docstring'''
import PIL.Image
lowerCAmelCase_ :int = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(lowercase__ , format="""png""" )
lowerCAmelCase_ :List[str] = pa.BufferOutputStream()
with ParquetWriter(
stream=lowercase__ , features=Features({"""image""": Image()} ) , embed_local_files=lowercase__ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
lowerCAmelCase_ :str = pa.BufferReader(output.getvalue() )
lowerCAmelCase_ :pa.Table = pq.read_table(lowercase__ )
lowerCAmelCase_ :List[str] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , lowercase__ )
with open(lowercase__ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _snake_case ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ :Any = pa.schema([pa.field("""col_1""" , pa.string() , nullable=lowercase__ )] )
lowerCAmelCase_ :Dict = pa.BufferOutputStream()
with ArrowWriter(stream=lowercase__ ) as writer:
writer._build_writer(inferred_schema=lowercase__ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 84
|
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
UpperCAmelCase__ = datasets.utils.logging.get_logger(__name__)
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilderConfig ):
UpperCamelCase = None
UpperCamelCase = None
class __lowerCAmelCase ( folder_based_builder.FolderBasedBuilder ):
UpperCamelCase = datasets.Audio()
UpperCamelCase = '''audio'''
UpperCamelCase = AudioFolderConfig
UpperCamelCase = 42 # definition at the bottom of the script
UpperCamelCase = AudioClassification(audio_column='''audio''' , label_column='''label''' )
UpperCAmelCase__ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
UpperCAmelCase__ = AUDIO_EXTENSIONS
| 339
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=18 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=False , ):
__lowerCAmelCase : str = size if size is not None else {'height': 20, 'width': 20}
__lowerCAmelCase : List[str] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Optional[int] = num_channels
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : Optional[Any] = min_resolution
__lowerCAmelCase : int = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Dict = size
__lowerCAmelCase : List[str] = do_center_crop
__lowerCAmelCase : List[Any] = crop_size
__lowerCAmelCase : Dict = do_normalize
__lowerCAmelCase : List[Any] = image_mean
__lowerCAmelCase : List[str] = image_std
__lowerCAmelCase : str = do_reduce_labels
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __lowerCAmelCase ():
__lowerCAmelCase : Dict = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowerCAmelCase : int = Image.open(dataset[0]['file'] )
__lowerCAmelCase : str = Image.open(dataset[1]['file'] )
return image, map
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = load_dataset('hf-internal-testing/fixtures_ade20k' , split='test' )
__lowerCAmelCase : Dict = Image.open(ds[0]['file'] )
__lowerCAmelCase : Optional[int] = Image.open(ds[1]['file'] )
__lowerCAmelCase : Union[str, Any] = Image.open(ds[2]['file'] )
__lowerCAmelCase : Optional[int] = Image.open(ds[3]['file'] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = BeitImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = BeitImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'center_crop' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 20, 'width': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
self.assertEqual(image_processor.do_reduce_labels , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
self.assertEqual(image_processor.do_reduce_labels , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase : Optional[Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = []
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
__lowerCAmelCase : Optional[int] = image_processing(image_inputs[0] , maps[0] , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched
__lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test not batched input (PIL images)
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = prepare_semantic_single_inputs()
__lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
1,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
# Test batched input (PIL images)
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = prepare_semantic_batch_inputs()
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertEqual(
encoding['pixel_values'].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(
encoding['labels'].shape , (
2,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
self.assertEqual(encoding['labels'].dtype , torch.long )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
__lowerCAmelCase , __lowerCAmelCase : int = prepare_semantic_single_inputs()
__lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 1_50 )
__lowerCAmelCase : str = True
__lowerCAmelCase : Optional[int] = image_processing(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertTrue(encoding['labels'].min().item() >= 0 )
self.assertTrue(encoding['labels'].max().item() <= 2_55 )
| 182
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = ShapEPipeline
A_ : str = ['prompt']
A_ : Any = ['prompt']
A_ : List[Any] = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Optional[int] = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(_SCREAMING_SNAKE_CASE )
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : int = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : Union[str, Any] = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.dummy_prior
__lowerCAmelCase : str = self.dummy_text_encoder
__lowerCAmelCase : List[Any] = self.dummy_tokenizer
__lowerCAmelCase : str = self.dummy_renderer
__lowerCAmelCase : List[Any] = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : int = {
'prior': prior,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'prompt': 'horse',
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = output.images[0]
__lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : str = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : str = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = 1
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : int = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Dict = batch_size * [inputs[key]]
__lowerCAmelCase : Any = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_np_out.npy' )
__lowerCAmelCase : Dict = ShapEPipeline.from_pretrained('openai/shap-e' )
__lowerCAmelCase : int = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : List[str] = pipe(
'a shark' , generator=_SCREAMING_SNAKE_CASE , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 182
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.