code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )-> float | int:
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
_UpperCAmelCase = cst_fwd.get(__lowerCAmelCase , np.inf )
_UpperCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
_UpperCAmelCase = new_cost_f
_UpperCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
_UpperCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> int:
"""simple docstring"""
_UpperCAmelCase = -1
_UpperCAmelCase = set()
_UpperCAmelCase = set()
_UpperCAmelCase = {source: 0}
_UpperCAmelCase = {destination: 0}
_UpperCAmelCase = {source: None}
_UpperCAmelCase = {destination: None}
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = PriorityQueue()
_UpperCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
_UpperCAmelCase , _UpperCAmelCase = queue_forward.get()
visited_forward.add(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = queue_backward.get()
visited_backward.add(__lowerCAmelCase )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
_UpperCAmelCase = pass_and_relaxation(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
_UpperCAmelCase = shortest_distance
return shortest_path_distance
_a = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
_a = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39
|
"""simple docstring"""
import os
def __UpperCAmelCase ( ) -> int:
with open(os.path.dirname(__lowerCamelCase ) + '''/p022_names.txt''' ) as file:
lowercase__ : List[Any] = str(file.readlines()[0] )
lowercase__ : Dict = names.replace('''"''' , '''''' ).split(''',''' )
names.sort()
lowercase__ : int = 0
lowercase__ : Optional[Any] = 0
for i, name in enumerate(__lowerCamelCase ):
for letter in name:
name_score += ord(__lowerCamelCase ) - 64
total_score += (i + 1) * name_score
lowercase__ : List[str] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 16
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = IFPipeline
snake_case_ = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
snake_case_ = TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCamelCase_ ( self : Union[str, Any] ):
return self._get_dummy_components()
def UpperCamelCase_ ( self : int ,A : Any ,A : Optional[Any]=0 ):
if str(A ).startswith("mps" ):
__A = torch.manual_seed(A )
else:
__A = torch.Generator(device=A ).manual_seed(A )
__A = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def UpperCamelCase_ ( self : Optional[Any] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" ,reason="float16 requires CUDA" )
def UpperCamelCase_ ( self : List[str] ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCamelCase_ ( self : Tuple ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCamelCase_ ( self : int ):
self._test_save_load_local()
def UpperCamelCase_ ( self : Dict ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() ,reason="XFormers attention is only available with CUDA and `xformers` installed" ,)
def UpperCamelCase_ ( self : List[str] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : Any ):
# if
__A = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" ,variant="fp16" ,torch_dtype=torch.floataa )
__A = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" ,variant="fp16" ,torch_dtype=torch.floataa ,text_encoder=A ,tokenizer=A )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
__A , __A = pipe_a.encode_prompt("anime turtle" ,device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__A = None
__A = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A ,A ,A ,A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__A = IFImgaImgPipeline(**pipe_a.components )
__A = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A ,A ,A ,A )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__A = IFInpaintingPipeline(**pipe_a.components )
__A = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A ,A ,A ,A )
def UpperCamelCase_ ( self : List[Any] ,A : Any ,A : Any ,A : Dict ,A : List[Any] ):
# pipeline 1
_start_torch_memory_measurement()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,num_inference_steps=2 ,generator=A ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(A ,A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,generator=A ,num_inference_steps=2 ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(A ,A )
def UpperCamelCase_ ( self : Optional[Any] ,A : List[str] ,A : List[Any] ,A : Any ,A : int ):
# pipeline 1
_start_torch_memory_measurement()
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A )
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,num_inference_steps=2 ,generator=A ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(A ,A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(A )
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,original_image=A ,generator=A ,num_inference_steps=2 ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(A ,A )
def UpperCamelCase_ ( self : Union[str, Any] ,A : str ,A : str ,A : Dict ,A : Tuple ):
# pipeline 1
_start_torch_memory_measurement()
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A )
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(A )
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,mask_image=A ,num_inference_steps=2 ,generator=A ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (64, 64, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(A ,A )
# pipeline 2
_start_torch_memory_measurement()
__A = torch.Generator(device="cpu" ).manual_seed(0 )
__A = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(A )
__A = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(A )
__A = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(1 ) ).to(A )
__A = pipe_a(
prompt_embeds=A ,negative_prompt_embeds=A ,image=A ,mask_image=A ,original_image=A ,generator=A ,num_inference_steps=2 ,output_type="np" ,)
__A = output.images[0]
assert image.shape == (2_56, 2_56, 3)
__A = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
__A = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(A ,A )
def UpperCAmelCase ( ) -> Any:
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 124
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE :int = {'configuration_encoder_decoder': ['EncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = ['EncoderDecoderModel']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :int = ['TFEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Optional[int] = ['FlaxEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
SCREAMING_SNAKE_CASE :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 124
| 1
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : pyspark.sql.DataFrame , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : str = "arrow" , **__SCREAMING_SNAKE_CASE : List[Any] , ) -> str:
super().__init__(
split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = load_from_cache_file
lowerCamelCase_ = file_format
lowerCamelCase_ = Spark(
df=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , working_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def UpperCamelCase ( self : Optional[Any] ) -> int:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCamelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__SCREAMING_SNAKE_CASE , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 183
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :List[Any] = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "yolos"
def __init__( self : Any ,A : Optional[Any]=7_68 ,A : Dict=12 ,A : Any=12 ,A : str=30_72 ,A : Any="gelu" ,A : str=0.0 ,A : List[str]=0.0 ,A : Dict=0.02 ,A : int=1E-12 ,A : Tuple=[5_12, 8_64] ,A : List[Any]=16 ,A : str=3 ,A : str=True ,A : Any=1_00 ,A : Dict=True ,A : Dict=False ,A : Tuple=1 ,A : Union[str, Any]=5 ,A : Optional[Any]=2 ,A : Union[str, Any]=5 ,A : int=2 ,A : int=0.1 ,**A : List[str] ,):
super().__init__(**A )
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = initializer_range
__A = layer_norm_eps
__A = image_size
__A = patch_size
__A = num_channels
__A = qkv_bias
__A = num_detection_tokens
__A = use_mid_position_embeddings
__A = auxiliary_loss
# Hungarian matcher
__A = class_cost
__A = bbox_cost
__A = giou_cost
# Loss coefficients
__A = bbox_loss_coefficient
__A = giou_loss_coefficient
__A = eos_coefficient
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = version.parse("1.11" )
@property
def UpperCamelCase_ ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase_ ( self : List[Any] ):
return 1E-4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 12
| 15
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = SamImageProcessor()
lowerCamelCase = SamProcessor(A )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A ) -> str:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor
def __A ( self ) -> List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""np""" )
lowerCamelCase = processor(images=A , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = [torch.ones((1, 3, 5, 5) )]
lowerCamelCase = [[17_64, 26_46]]
lowerCamelCase = [[6_83, 10_24]]
lowerCamelCase = processor.post_process_masks(A , A , A )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowerCamelCase = processor.post_process_masks(
A , torch.tensor(A ) , torch.tensor(A ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
lowerCamelCase = [np.ones((1, 3, 5, 5) )]
lowerCamelCase = processor.post_process_masks(A , np.array(A ) , np.array(A ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(A ):
lowerCamelCase = processor.post_process_masks(A , np.array(A ) , np.array(A ) )
@require_vision
@require_tf
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = SamImageProcessor()
lowerCamelCase = SamProcessor(A )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A ) -> List[str]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor
def __A ( self ) -> Optional[int]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_image_processor(do_normalize=A , padding_value=1.0 )
lowerCamelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""np""" )
lowerCamelCase = processor(images=A , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = [tf.ones((1, 3, 5, 5) )]
lowerCamelCase = [[17_64, 26_46]]
lowerCamelCase = [[6_83, 10_24]]
lowerCamelCase = processor.post_process_masks(A , A , A , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowerCamelCase = processor.post_process_masks(
A , tf.convert_to_tensor(A ) , tf.convert_to_tensor(A ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
lowerCamelCase = [np.ones((1, 3, 5, 5) )]
lowerCamelCase = processor.post_process_masks(
A , np.array(A ) , np.array(A ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
lowerCamelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
lowerCamelCase = processor.post_process_masks(
A , np.array(A ) , np.array(A ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = tempfile.mkdtemp()
lowerCamelCase = SamImageProcessor()
lowerCamelCase = SamProcessor(A )
processor.save_pretrained(self.tmpdirname )
def __A ( self , **A ) -> Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor
def __A ( self ) -> Any:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
lowerCamelCase = [tf.convert_to_tensor(A )]
lowerCamelCase = [torch.tensor(A )]
lowerCamelCase = [[17_64, 26_46]]
lowerCamelCase = [[6_83, 10_24]]
lowerCamelCase = processor.post_process_masks(
A , A , A , return_tensors="""tf""" )
lowerCamelCase = processor.post_process_masks(
A , A , A , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = self.get_image_processor()
lowerCamelCase = SamProcessor(image_processor=A )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(A , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCamelCase = processor(images=A , return_tensors="""pt""" )["""pixel_values"""].numpy()
lowerCamelCase = image_processor(A , return_tensors="""tf""" )["""pixel_values"""].numpy()
lowerCamelCase = processor(images=A , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(A , A ) )
self.assertTrue(np.allclose(A , A ) )
self.assertTrue(np.allclose(A , A ) )
| 355
|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ):
'''simple docstring'''
lowerCamelCase = Mock()
lowerCamelCase = conn, Mock()
lowerCamelCase = iter([1, None] )
lowerCamelCase = lambda lowerCamelCase__ : next(lowerCamelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=lowerCamelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 66
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowercase (unittest.TestCase ):
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
__lowerCAmelCase : int = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
__lowerCAmelCase : List[Any] = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
__lowerCAmelCase : Optional[int] = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 1_6000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , _lowercase )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
# load decoder from hub
__lowerCAmelCase : Any = '''hf-internal-testing/ngram-beam-search-decoder'''
def UpperCamelCase__ ( self , **A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.add_kwargs_tokens_map.copy()
kwargs.update(_lowercase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase__ ( self , **A_ ) ->Optional[int]:
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowercase )
def UpperCamelCase__ ( self , **A_ ) ->Optional[Any]:
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowercase )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : Tuple = self.get_feature_extractor()
__lowerCAmelCase : Union[str, Any] = self.get_decoder()
__lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , _lowercase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _lowercase )
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
__lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(_lowercase , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=_lowercase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = self.get_feature_extractor()
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = self.get_decoder()
__lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : List[str] = floats_list((3, 1000) )
__lowerCAmelCase : Any = feature_extractor(_lowercase , return_tensors='''np''' )
__lowerCAmelCase : str = processor(_lowercase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = self.get_feature_extractor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_decoder()
__lowerCAmelCase : Any = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : Optional[Any] = '''This is a test string'''
__lowerCAmelCase : Optional[int] = processor(text=_lowercase )
__lowerCAmelCase : Optional[Any] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase__ ( self , A_=(2, 10, 16) , A_=77 ) ->int:
'''simple docstring'''
np.random.seed(_lowercase )
return np.random.rand(*_lowercase )
def UpperCamelCase__ ( self ) ->Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = self.get_feature_extractor()
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : int = self.get_decoder()
__lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : List[Any] = self._get_dummy_logits(shape=(10, 16) , seed=13 )
__lowerCAmelCase : Optional[int] = processor.decode(_lowercase )
__lowerCAmelCase : Dict = decoder.decode_beams(_lowercase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def UpperCamelCase__ ( self , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : int = self.get_feature_extractor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_decoder()
__lowerCAmelCase : Dict = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : Any = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCAmelCase : Optional[Any] = processor.batch_decode(_lowercase )
else:
with get_context(_lowercase ).Pool() as pool:
__lowerCAmelCase : int = processor.batch_decode(_lowercase , _lowercase )
__lowerCAmelCase : int = list(_lowercase )
with get_context('''fork''' ).Pool() as p:
__lowerCAmelCase : Tuple = decoder.decode_beams_batch(_lowercase , _lowercase )
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : str = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(_lowercase , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(_lowercase , decoded_processor.logit_score )
self.assertListEqual(_lowercase , decoded_processor.lm_score )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = self.get_feature_extractor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : int = self.get_decoder()
__lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : int = self._get_dummy_logits()
__lowerCAmelCase : Tuple = 15
__lowerCAmelCase : int = -20.0
__lowerCAmelCase : Tuple = -4.0
__lowerCAmelCase : List[str] = processor.batch_decode(
_lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
__lowerCAmelCase : Dict = decoded_processor_out.text
__lowerCAmelCase : Optional[int] = list(_lowercase )
with get_context('''fork''' ).Pool() as pool:
__lowerCAmelCase : Optional[int] = decoder.decode_beams_batch(
_lowercase , _lowercase , beam_width=_lowercase , beam_prune_logp=_lowercase , token_min_logp=_lowercase , )
__lowerCAmelCase : Optional[Any] = [d[0][0] for d in decoded_decoder_out]
__lowerCAmelCase : Any = [d[0][2] for d in decoded_decoder_out]
__lowerCAmelCase : int = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , _lowercase )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , _lowercase , atol=1e-3 ) )
self.assertTrue(np.array_equal(_lowercase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , _lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.get_feature_extractor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_decoder()
__lowerCAmelCase : List[Any] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
__lowerCAmelCase : Dict = self._get_dummy_logits()
__lowerCAmelCase : List[str] = 2.0
__lowerCAmelCase : Dict = 5.0
__lowerCAmelCase : int = -20.0
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[Any] = processor.batch_decode(
_lowercase , alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
__lowerCAmelCase : Optional[int] = decoded_processor_out.text
__lowerCAmelCase : Optional[Any] = list(_lowercase )
decoder.reset_params(
alpha=_lowercase , beta=_lowercase , unk_score_offset=_lowercase , lm_score_boundary=_lowercase , )
with get_context('''fork''' ).Pool() as pool:
__lowerCAmelCase : str = decoder.decode_beams_batch(
_lowercase , _lowercase , )
__lowerCAmelCase : int = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_lowercase , _lowercase )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , _lowercase )
__lowerCAmelCase : List[str] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , _lowercase )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : Dict = processor.decoder.model_container[processor.decoder._model_key]
__lowerCAmelCase : str = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowerCAmelCase : Any = os.listdir(_lowercase )
__lowerCAmelCase : Optional[int] = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : int = snapshot_download('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(_lowercase )
__lowerCAmelCase : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
__lowerCAmelCase : Union[str, Any] = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
__lowerCAmelCase : List[str] = os.listdir(_lowercase )
__lowerCAmelCase : Tuple = os.listdir(_lowercase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : int = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : List[Any] = floats_list((3, 1000) )
__lowerCAmelCase : Union[str, Any] = processor_wavaveca(_lowercase , return_tensors='''np''' )
__lowerCAmelCase : int = processor_auto(_lowercase , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__lowerCAmelCase : str = self._get_dummy_logits()
__lowerCAmelCase : Optional[Any] = processor_wavaveca.batch_decode(_lowercase )
__lowerCAmelCase : Tuple = processor_auto.batch_decode(_lowercase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.get_feature_extractor()
__lowerCAmelCase : Optional[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_decoder()
__lowerCAmelCase : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowercase , feature_extractor=_lowercase , decoder=_lowercase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def UpperCamelCase__ ( A_ , A_ ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = [d[key] for d in offsets]
return retrieved_list
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : str = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : List[Any] = self._get_dummy_logits()[0]
__lowerCAmelCase : str = processor.decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
__lowerCAmelCase : Dict = self._get_dummy_logits()
__lowerCAmelCase : Optional[Any] = processor.batch_decode(_lowercase , output_word_offsets=_lowercase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(_lowercase , _lowercase ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
import torch
__lowerCAmelCase : Optional[Any] = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=_lowercase )
__lowerCAmelCase : Optional[int] = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6000 ) )
__lowerCAmelCase : int = iter(_lowercase )
__lowerCAmelCase : int = next(_lowercase )
__lowerCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
__lowerCAmelCase : Union[str, Any] = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCAmelCase : List[Any] = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
__lowerCAmelCase : Optional[int] = model(_lowercase ).logits.cpu().numpy()
__lowerCAmelCase : Optional[Any] = processor.decode(logits[0] , output_word_offsets=_lowercase )
__lowerCAmelCase : List[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCAmelCase : List[str] = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
__lowerCAmelCase : List[Any] = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , _lowercase )
self.assertEqual(''' '''.join(self.get_from_offsets(_lowercase , '''word''' ) ) , output.text )
# output times
__lowerCAmelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''start_time''' ) )
__lowerCAmelCase : Optional[int] = torch.tensor(self.get_from_offsets(_lowercase , '''end_time''' ) )
# fmt: off
__lowerCAmelCase : Any = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
__lowerCAmelCase : int = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=0.01 ) )
| 275
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Optional[Any] ):
__UpperCAmelCase = str(id_ )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = []
__UpperCAmelCase = {} # {vertex:distance}
def __lt__( self : str , _lowercase : List[Any] ):
return self.key < other.key
def __repr__( self : int ):
return self.id
def a ( self : Union[str, Any] , _lowercase : int ):
self.neighbors.append(_lowercase )
def a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
__UpperCAmelCase = weight
def lowercase__ ( snake_case_ :int , snake_case_ :Any , snake_case_ :Union[str, Any] , snake_case_ :List[str] ):
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , snake_case_ )
graph[b - 1].add_edge(graph[a - 1] , snake_case_ )
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
__UpperCAmelCase = []
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = graph[:]
while q:
__UpperCAmelCase = min(snake_case_ )
q.remove(snake_case_ )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
for i in range(1 , len(snake_case_ ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase__ ( snake_case_ :list , snake_case_ :Vertex ):
for u in graph:
__UpperCAmelCase = math.inf
__UpperCAmelCase = None
__UpperCAmelCase = 0
__UpperCAmelCase = list(snake_case_ )
hq.heapify(snake_case_ )
while h:
__UpperCAmelCase = hq.heappop(snake_case_ )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCAmelCase = u
__UpperCAmelCase = u.edges[v.id]
hq.heapify(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
| 0
|
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> np.ndarray:
"""simple docstring"""
_UpperCamelCase = cva.getAffineTransform(__snake_case, __snake_case )
return cva.warpAffine(__snake_case, __snake_case, (rows, cols) )
if __name__ == "__main__":
# read original image
_a = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
_a = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_a , _a = gray_img.shape
# set different points to rotate image
_a = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_a = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_a = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_a = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_a = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_a = plt.figure(1)
_a = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 368
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _UpperCAmelCase( lowerCamelCase ):
def __init__( self , __a , __a , __a , __a = None , ) -> List[str]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=__a , vae=__a , scheduler=__a)
# create a imagenet -> id dictionary for easier use
_UpperCamelCase = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''','''):
_UpperCamelCase = int(__a)
_UpperCamelCase = dict(sorted(self.labels.items()))
def UpperCAmelCase ( self , __a) -> List[int]:
'''simple docstring'''
if not isinstance(__a , __a):
_UpperCamelCase = list(__a)
for l in label:
if l not in self.labels:
raise ValueError(
F'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''')
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , __a , __a = 4.0 , __a = None , __a = 50 , __a = "pil" , __a = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCamelCase = len(__a)
_UpperCamelCase = self.transformer.config.sample_size
_UpperCamelCase = self.transformer.config.in_channels
_UpperCamelCase = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=__a , device=self.device , dtype=self.transformer.dtype , )
_UpperCamelCase = torch.cat([latents] * 2) if guidance_scale > 1 else latents
_UpperCamelCase = torch.tensor(__a , device=self.device).reshape(-1)
_UpperCamelCase = torch.tensor([10_00] * batch_size , device=self.device)
_UpperCamelCase = torch.cat([class_labels, class_null] , 0) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(__a)
for t in self.progress_bar(self.scheduler.timesteps):
if guidance_scale > 1:
_UpperCamelCase = latent_model_input[: len(__a) // 2]
_UpperCamelCase = torch.cat([half, half] , dim=0)
_UpperCamelCase = self.scheduler.scale_model_input(__a , __a)
_UpperCamelCase = t
if not torch.is_tensor(__a):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_UpperCamelCase = latent_model_input.device.type == '''mps'''
if isinstance(__a , __a):
_UpperCamelCase = torch.floataa if is_mps else torch.floataa
else:
_UpperCamelCase = torch.intaa if is_mps else torch.intaa
_UpperCamelCase = torch.tensor([timesteps] , dtype=__a , device=latent_model_input.device)
elif len(timesteps.shape) == 0:
_UpperCamelCase = timesteps[None].to(latent_model_input.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCamelCase = timesteps.expand(latent_model_input.shape[0])
# predict noise model_output
_UpperCamelCase = self.transformer(
__a , timestep=__a , class_labels=__a).sample
# perform guidance
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_UpperCamelCase , _UpperCamelCase = torch.split(__a , len(__a) // 2 , dim=0)
_UpperCamelCase = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_UpperCamelCase = torch.cat([half_eps, half_eps] , dim=0)
_UpperCamelCase = torch.cat([eps, rest] , dim=1)
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_UpperCamelCase , _UpperCamelCase = torch.split(__a , __a , dim=1)
else:
_UpperCamelCase = noise_pred
# compute previous image: x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(__a , __a , __a).prev_sample
if guidance_scale > 1:
_UpperCamelCase , _UpperCamelCase = latent_model_input.chunk(2 , dim=0)
else:
_UpperCamelCase = latent_model_input
_UpperCamelCase = 1 / self.vae.config.scaling_factor * latents
_UpperCamelCase = self.vae.decode(__a).sample
_UpperCamelCase = (samples / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = samples.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(__a)
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=__a)
| 100
| 0
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 48
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261
| 0
|
def a ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if num < 0:
return False
UpperCamelCase : int = num
UpperCamelCase : int = 0
while num > 0:
UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 315
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
def a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : List[str] = CLIPConfig
__UpperCamelCase : Optional[int] = ["CLIPEncoderLayer"]
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Any = CLIPVisionModel(config.vision_config )
UpperCamelCase : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.Parameter(torch.ones(3 ) , requires_grad=__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Tuple = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : Union[str, Any] = self.visual_projection(__SCREAMING_SNAKE_CASE )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase : Optional[int] = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds ).cpu().float().numpy()
UpperCamelCase : List[Any] = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds ).cpu().float().numpy()
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = image_embeds.shape[0]
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[Any] = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCamelCase : List[str] = special_cos_dist[i][concept_idx]
UpperCamelCase : Optional[Any] = self.special_care_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
UpperCamelCase : Optional[int] = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCamelCase : Optional[int] = cos_dist[i][concept_idx]
UpperCamelCase : List[str] = self.concept_embeds_weights[concept_idx].item()
UpperCamelCase : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(__SCREAMING_SNAKE_CASE )
result.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.vision_model(__SCREAMING_SNAKE_CASE )[1] # pooled_output
UpperCamelCase : int = self.visual_projection(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = cosine_distance(__SCREAMING_SNAKE_CASE , self.special_care_embeds )
UpperCamelCase : str = cosine_distance(__SCREAMING_SNAKE_CASE , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCamelCase : Union[str, Any] = 0.0
UpperCamelCase : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCamelCase : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
UpperCamelCase : int = special_care * 0.01
UpperCamelCase : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCamelCase : Optional[int] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCamelCase : List[str] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 315
| 1
|
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : Optional[Any] = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : List[str] = 0
while b > 0:
if b & 1:
snake_case : int = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 124
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if len(lowercase ) != 2 or len(a[0] ) != 2 or len(lowercase ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
snake_case : int = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> str:
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(lowercase ) )
]
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[list, list, list, list]:
if len(lowercase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
snake_case : Optional[int] = len(lowercase )
snake_case : str = matrix_length // 2
snake_case : int = [[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase )]
snake_case : str = [
[a[i][j] for j in range(lowercase ,lowercase )] for i in range(lowercase ,lowercase )
]
snake_case : Optional[Any] = [[a[i][j] for j in range(lowercase )] for i in range(lowercase )]
snake_case : str = [[a[i][j] for j in range(lowercase )] for i in range(lowercase ,lowercase )]
return top_left, top_right, bot_left, bot_right
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> tuple[int, int]:
return len(lowercase ), len(matrix[0] )
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> None:
print("""\n""".join(str(lowercase ) for line in matrix ) )
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase ) == (2, 2):
return default_matrix_multiplication(lowercase ,lowercase )
snake_case , snake_case , snake_case , snake_case : Optional[Any] = split_matrix(lowercase )
snake_case , snake_case , snake_case , snake_case : Any = split_matrix(lowercase )
snake_case : List[Any] = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : List[str] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : Tuple = actual_strassen(matrix_addition(lowercase ,lowercase ) ,lowercase )
snake_case : str = actual_strassen(lowercase ,matrix_subtraction(lowercase ,lowercase ) )
snake_case : Union[str, Any] = actual_strassen(matrix_addition(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : int = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : List[Any] = actual_strassen(matrix_subtraction(lowercase ,lowercase ) ,matrix_addition(lowercase ,lowercase ) )
snake_case : str = matrix_addition(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
snake_case : List[str] = matrix_addition(lowercase ,lowercase )
snake_case : Any = matrix_addition(lowercase ,lowercase )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(lowercase ,lowercase ) ,lowercase ) ,lowercase )
# construct the new matrix from our 4 quadrants
snake_case : Optional[Any] = []
for i in range(len(lowercase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(lowercase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> list:
if matrix_dimensions(lowercase )[1] != matrix_dimensions(lowercase )[0]:
snake_case : Optional[Any] = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f"""Matrix A: {matrixa}\n"""
f"""Matrix B: {matrixa}"""
)
raise Exception(lowercase )
snake_case : str = matrix_dimensions(lowercase )
snake_case : Optional[Any] = matrix_dimensions(lowercase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : Dict = max(*lowercase ,*lowercase )
snake_case : Optional[Any] = int(math.pow(2 ,math.ceil(math.loga(lowercase ) ) ) )
snake_case : Any = matrixa
snake_case : Optional[Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Optional[int] = actual_strassen(lowercase ,lowercase )
# Removing the additional zeros
for i in range(0 ,lowercase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] ,lowercase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
lowerCamelCase : Any = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
lowerCamelCase : int = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 124
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case = {
'''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GraphormerForGraphClassification''',
'''GraphormerModel''',
'''GraphormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 370
|
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__snake_case = 16
__snake_case = 32
def a ( __a , __a = 16 , __a = "bert-base-cased" ) -> Any:
'''simple docstring'''
UpperCamelCase__ :List[str] = AutoTokenizer.from_pretrained(__a )
UpperCamelCase__ :List[Any] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__a ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ :Tuple = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__a , max_length=__a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ :Optional[int] = datasets.map(
__a , batched=__a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__a )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ :Optional[int] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__a ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
UpperCamelCase__ :Dict = DataLoader(
tokenized_datasets['''train'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
UpperCamelCase__ :str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__a , collate_fn=__a , batch_size=__a )
return train_dataloader, eval_dataloader
def a ( __a , __a , __a , __a ) -> str:
'''simple docstring'''
model.eval()
UpperCamelCase__ :List[str] = 0
for step, batch in enumerate(__a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ :int = model(**__a )
UpperCamelCase__ :Tuple = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ :int = accelerator.gather(
(predictions, batch['''labels''']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__a ) - 1:
UpperCamelCase__ :Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ :List[str] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__a , references=__a , )
UpperCamelCase__ :Union[str, Any] = metric.compute()
return eval_metric["accuracy"]
def a ( __a , __a ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ :Any = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ :Any = config['''lr''']
UpperCamelCase__ :Optional[int] = int(config['''num_epochs'''] )
UpperCamelCase__ :List[Any] = int(config['''seed'''] )
UpperCamelCase__ :List[Any] = int(config['''batch_size'''] )
UpperCamelCase__ :List[Any] = args.model_name_or_path
set_seed(__a )
UpperCamelCase__ , UpperCamelCase__ :Any = get_dataloaders(__a , __a , __a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ :Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__a , return_dict=__a )
# Instantiate optimizer
UpperCamelCase__ :Any = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ :Optional[Any] = optimizer_cls(params=model.parameters() , lr=__a )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ :Tuple = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
UpperCamelCase__ :Dict = 1
UpperCamelCase__ :Tuple = (len(__a ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ :Any = get_linear_schedule_with_warmup(
optimizer=__a , num_warmup_steps=0 , num_training_steps=__a , )
else:
UpperCamelCase__ :Any = DummyScheduler(__a , total_num_steps=__a , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Dict = accelerator.prepare(
__a , __a , __a , __a , __a )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ :Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ :Optional[Any] = 0
UpperCamelCase__ :Optional[int] = evaluate.load('''glue''' , '''mrpc''' )
UpperCamelCase__ :List[Any] = num_epochs
if args.partial_train_epoch is not None:
UpperCamelCase__ :Optional[Any] = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
UpperCamelCase__ :Dict = args.resume_from_checkpoint.split('''epoch_''' )[1]
UpperCamelCase__ :Tuple = ''''''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
UpperCamelCase__ :Any = int(__a ) + 1
UpperCamelCase__ :Dict = evaluation_loop(__a , __a , __a , __a )
accelerator.print('''resumed checkpoint performance:''' , __a )
accelerator.print('''resumed checkpoint\'s scheduler\'s lr:''' , lr_scheduler.get_lr()[0] )
accelerator.print('''resumed optimizers\'s lr:''' , optimizer.param_groups[0]['''lr'''] )
with open(os.path.join(args.output_dir , f'''state_{starting_epoch-1}.json''' ) , '''r''' ) as f:
UpperCamelCase__ :Optional[int] = json.load(__a )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
UpperCamelCase__ :Optional[Any] = {}
for epoch in range(__a , __a ):
model.train()
for step, batch in enumerate(__a ):
UpperCamelCase__ :Optional[int] = model(**__a )
UpperCamelCase__ :Optional[int] = outputs.loss
UpperCamelCase__ :str = loss / gradient_accumulation_steps
accelerator.backward(__a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
UpperCamelCase__ :Union[str, Any] = f'''epoch_{epoch}'''
UpperCamelCase__ :List[Any] = os.path.join(args.output_dir , __a )
accelerator.save_state(__a )
UpperCamelCase__ :List[Any] = evaluation_loop(__a , __a , __a , __a )
UpperCamelCase__ :int = accuracy
UpperCamelCase__ :List[Any] = lr_scheduler.get_lr()[0]
UpperCamelCase__ :Any = optimizer.param_groups[0]['''lr''']
UpperCamelCase__ :int = epoch
UpperCamelCase__ :Tuple = overall_step
accelerator.print(f'''epoch {epoch}:''' , __a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f'''state_{epoch}.json''' ) , '''w''' ) as f:
json.dump(__a , __a )
def a ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__a , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__a , )
parser.add_argument(
'''--output_dir''' , type=__a , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--resume_from_checkpoint''' , type=__a , default=__a , help='''If the training should continue from a checkpoint folder.''' , )
parser.add_argument(
'''--partial_train_epoch''' , type=__a , default=__a , help='''If passed, the training will stop after this number of epochs.''' , )
parser.add_argument(
'''--num_epochs''' , type=__a , default=2 , help='''Number of train epochs.''' , )
UpperCamelCase__ :Optional[int] = parser.parse_args()
UpperCamelCase__ :List[str] = {'''lr''': 2e-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__a , __a )
if __name__ == "__main__":
main()
| 219
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self ):
UpperCamelCase__ = 4
UpperCamelCase__ = 4
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (4, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (4, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def UpperCAmelCase_ (self ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
model_accelerate.to(SCREAMING_SNAKE_CASE_ )
model_accelerate.eval()
UpperCamelCase__ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = noise.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = model_accelerate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=SCREAMING_SNAKE_CASE_ , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
model_normal_load.to(SCREAMING_SNAKE_CASE_ )
model_normal_load.eval()
UpperCamelCase__ = model_normal_load(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )["""sample"""]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase__ = noise.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-3 ) )
class __A( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = UNetaDModel
SCREAMING_SNAKE_CASE__ = """sample"""
@property
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_=(32, 32) ):
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=SCREAMING_SNAKE_CASE_ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
@property
def UpperCAmelCase_ (self ):
return (3, 32, 32)
def UpperCAmelCase_ (self ):
UpperCamelCase__ = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.dummy_input
UpperCamelCase__ = floats_tensor((4, 3) + (2_56, 2_56) ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = noise
UpperCamelCase__ = model(**SCREAMING_SNAKE_CASE_ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (2_56, 2_56)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor(batch_size * [1E-4] ).to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
UpperCamelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1E-2 ) )
def UpperCAmelCase_ (self ):
# not required for this model
pass
| 244
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , snake_case: List[str] , snake_case: Optional[Any]=13 , snake_case: List[str]=7 , snake_case: Dict=True , snake_case: List[str]=True , snake_case: Optional[int]=True , snake_case: Any=True , snake_case: Optional[Any]=99 , snake_case: Tuple=32 , snake_case: Tuple=5 , snake_case: Dict=4 , snake_case: Optional[Any]=37 , snake_case: Union[str, Any]="gelu" , snake_case: Tuple=0.1 , snake_case: List[Any]=0.1 , snake_case: List[str]=512 , snake_case: Optional[int]=16 , snake_case: int=2 , snake_case: List[Any]=0.0_2 , snake_case: Union[str, Any]=4 , ) -> List[str]:
snake_case_ :Dict = parent
snake_case_ :Any = batch_size
snake_case_ :Any = seq_length
snake_case_ :List[str] = is_training
snake_case_ :Optional[Any] = use_attention_mask
snake_case_ :Dict = use_token_type_ids
snake_case_ :Union[str, Any] = use_labels
snake_case_ :str = vocab_size
snake_case_ :int = hidden_size
snake_case_ :List[str] = num_hidden_layers
snake_case_ :Dict = num_attention_heads
snake_case_ :Any = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :Optional[Any] = attention_probs_dropout_prob
snake_case_ :Any = max_position_embeddings
snake_case_ :Union[str, Any] = type_vocab_size
snake_case_ :Optional[int] = type_sequence_label_size
snake_case_ :Union[str, Any] = initializer_range
snake_case_ :Tuple = num_choices
def lowerCAmelCase_ ( self: Tuple ) -> str:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Union[str, Any] = None
if self.use_attention_mask:
snake_case_ :str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Any = None
if self.use_token_type_ids:
snake_case_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :int = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase_ ( self: Optional[int] ) -> int:
snake_case_ :str = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Optional[int] = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCAmelCase_ ( self: Optional[Any] ) -> Any:
snake_case_ :int = self.prepare_config_and_inputs()
snake_case_, snake_case_, snake_case_, snake_case_ :Dict = config_and_inputs
snake_case_ :Union[str, Any] = True
snake_case_ :Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : List[str] = True
_A : Dict = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase_ ( self: int ) -> List[str]:
snake_case_ :Any = FlaxBertModelTester(self )
@slow
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
snake_case_ :Dict = FlaxBertModel.from_pretrained("""bert-base-cased""" )
snake_case_ :Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 66
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A (_lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = KandinskyVaaInpaintPipeline
__lowerCamelCase : Tuple = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__lowerCamelCase : Optional[Any] = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__lowerCamelCase : List[str] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__lowerCamelCase : Optional[int] = False
@property
def a_ ( self : str ) -> int:
"""simple docstring"""
return 32
@property
def a_ ( self : Any ) -> List[str]:
"""simple docstring"""
return 32
@property
def a_ ( self : str ) -> Dict:
"""simple docstring"""
return self.time_input_dim
@property
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return 1_00
@property
def a_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
A__ = {
"""in_channels""": 9,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A__ = UNetaDConditionModel(**_lowercase )
return model
@property
def a_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self : List[str] ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
A__ = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = self.dummy_unet
A__ = self.dummy_movq
A__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=_lowercase , set_alpha_to_one=_lowercase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_lowercase , )
A__ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a_ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[Any]=0 ) -> Tuple:
"""simple docstring"""
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowercase ) ).to(_lowercase )
A__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowercase )
# create init_image
A__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
A__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((2_56, 2_56) )
# create mask
A__ = np.ones((64, 64) , dtype=np.floataa )
A__ = 0
if str(_lowercase ).startswith("""mps""" ):
A__ = torch.manual_seed(_lowercase )
else:
A__ = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
A__ = {
"""image""": init_image,
"""mask_image""": mask,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 2,
"""guidance_scale""": 4.0,
"""output_type""": """np""",
}
return inputs
def a_ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
A__ = """cpu"""
A__ = self.get_dummy_components()
A__ = self.pipeline_class(**_lowercase )
A__ = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
A__ = pipe(**self.get_dummy_inputs(_lowercase ) )
A__ = output.images
A__ = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
print(f'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
A__ = np.array(
[0.5_0_7_7_5_9_0_3, 0.4_9_5_2_7_1_9_5, 0.4_8_8_2_4_5_4_3, 0.5_0_1_9_2_2_3_7, 0.4_8_6_4_4_9_0_6, 0.4_9_3_7_3_8_1_4, 0.4_7_8_0_5_9_8, 0.4_7_2_3_4_8_2_7, 0.4_8_3_2_7_8_4_8] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self : Any ) -> int:
"""simple docstring"""
A__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy""" )
A__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
A__ = np.ones((7_68, 7_68) , dtype=np.floataa )
A__ = 0
A__ = """a hat"""
A__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
A__ = KandinskyVaaInpaintPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder-inpaint""" , torch_dtype=torch.floataa )
A__ = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
A__ = torch.Generator(device="""cpu""" ).manual_seed(0 )
A__ , A__ = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
A__ = pipeline(
image=_lowercase , mask_image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="""np""" , )
A__ = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 370
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Optional[Any] = logging.get_logger(__name__)
A : List[str] = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCamelCase : Any = '''time_series_transformer'''
__lowerCamelCase : Dict = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Union[str, Any] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str = "student_t" , __lowerCAmelCase : str = "nll" , __lowerCAmelCase : int = 1 , __lowerCAmelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __lowerCAmelCase : Optional[Union[str, bool]] = "mean" , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : int = 0 , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : Optional[List[int]] = None , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 32 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : int = 2 , __lowerCAmelCase : bool = True , __lowerCAmelCase : str = "gelu" , __lowerCAmelCase : int = 64 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : float = 0.1 , __lowerCAmelCase : int = 1_00 , __lowerCAmelCase : float = 0.0_2 , __lowerCAmelCase : Optional[Any]=True , **__lowerCAmelCase : Union[str, Any] , ) -> Optional[int]:
"""simple docstring"""
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__lowerCAmelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
A__ = embedding_dimension
else:
A__ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(__lowerCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=__lowerCAmelCase , **__lowerCAmelCase )
@property
def a_ ( self : List[Any] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 276
| 0
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 304
|
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__magic_name__ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( __a , unittest.TestCase ):
"""simple docstring"""
__lowercase : Optional[Any] = XLNetTokenizer
__lowercase : List[str] = XLNetTokenizerFast
__lowercase : List[Any] = True
__lowercase : int = True
def snake_case_ ( self):
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = """<s>"""
__SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__) , lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__) , lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , """<unk>""")
self.assertEqual(vocab_keys[1] , """<s>""")
self.assertEqual(vocab_keys[-1] , """<eod>""")
self.assertEqual(len(lowerCAmelCase__) , 1_0_0_6)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""")
self.assertListEqual(lowerCAmelCase__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""])
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2])
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4])
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """""",
"""i""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""") , ["""▁he""", """ll""", """o"""])
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""")
self.assertListEqual(
lowerCAmelCase__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""se""",
""".""",
] , )
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = XLNetTokenizer.from_pretrained("""xlnet-base-cased""")
__SCREAMING_SNAKE_CASE = tokenizer.encode("""sequence builders""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__)
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def snake_case_ ( self):
# fmt: off
__SCREAMING_SNAKE_CASE = {"""input_ids""": [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="""xlnet-base-cased""" , revision="""c841166438c31ec7ca9a106dee7bb312b73ae511""" , )
| 100
| 0
|
from math import sqrt
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> bool:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
_UpperCAmelCase : str = True
# 0 and 1 are none primes.
if number <= 1:
_UpperCAmelCase : int = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
_UpperCAmelCase : Optional[int] = False
break
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'status' must been from type bool"
return status
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> Optional[int]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
_UpperCAmelCase : Optional[int] = list(range(2 , n + 1 ) )
_UpperCAmelCase : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase ) ):
for j in range(i + 1 , len(lowerCAmelCase ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
_UpperCAmelCase : Optional[Any] = 0
# filters actual prime numbers.
_UpperCAmelCase : Tuple = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'ans' must been from type list"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] ) -> Optional[int]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n > 2), "'N' must been an int and > 2"
_UpperCAmelCase : Any = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase ):
ans.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'ans' must been from type list"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and number >= 0, "'number' must been an int and >= 0"
_UpperCAmelCase : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
_UpperCAmelCase : str = 2
_UpperCAmelCase : Optional[int] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase ):
while quotient != 1:
if is_prime(lowerCAmelCase ) and (quotient % factor == 0):
ans.append(lowerCAmelCase )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'ans' must been from type list"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> str:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : str = 0
# prime factorization of 'number'
_UpperCAmelCase : Any = prime_factorization(lowerCAmelCase )
_UpperCAmelCase : Any = max(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'ans' must been from type int"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] ) -> List[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number >= 0
), "'number' bust been an int and >= 0"
_UpperCAmelCase : Optional[Any] = 0
# prime factorization of 'number'
_UpperCAmelCase : Dict = prime_factorization(lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = min(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'ans' must been from type int"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> Any:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase ), "compare bust been from type bool"
return number % 2 == 0
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Tuple ) -> List[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase ), "compare bust been from type bool"
return number % 2 != 0
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Optional[Any]:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase ) and (number > 2) and is_even(lowerCAmelCase )
), "'number' must been an int, even and > 2"
_UpperCAmelCase : int = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
_UpperCAmelCase : Union[str, Any] = get_prime_numbers(lowerCAmelCase )
_UpperCAmelCase : str = len(lowerCAmelCase )
# run variable for while-loops.
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Dict = None
# exit variable. for break up the loops
_UpperCAmelCase : List[Any] = True
while i < len_pn and loop:
_UpperCAmelCase : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
_UpperCAmelCase : Optional[Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and (len(lowerCAmelCase ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int , lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : List[str] = 0
while numbera != 0:
_UpperCAmelCase : Optional[Any] = numbera % numbera
_UpperCAmelCase : List[Any] = numbera
_UpperCAmelCase : Dict = rest
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[Any] , lowerCAmelCase: Any ) -> Dict:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
_UpperCAmelCase : Any = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
_UpperCAmelCase : List[Any] = prime_factorization(lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = prime_factorization(lowerCAmelCase )
elif numbera == 1 or numbera == 1:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Dict = max(lowerCAmelCase , lowerCAmelCase )
_UpperCAmelCase : int = 0
_UpperCAmelCase : int = 0
_UpperCAmelCase : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
_UpperCAmelCase : Union[str, Any] = prime_fac_a.count(lowerCAmelCase )
_UpperCAmelCase : int = prime_fac_a.count(lowerCAmelCase )
for _ in range(max(lowerCAmelCase , lowerCAmelCase ) ):
ans *= n
else:
_UpperCAmelCase : Dict = prime_fac_a.count(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
ans *= n
done.append(lowerCAmelCase )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
_UpperCAmelCase : Any = prime_fac_a.count(lowerCAmelCase )
for _ in range(lowerCAmelCase ):
ans *= n
done.append(lowerCAmelCase )
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> Optional[Any]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n >= 0), "'number' must been a positive int"
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Union[str, Any] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and is_prime(
lowerCAmelCase ), "'ans' must been a prime number and from type int"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Union[str, Any] , lowerCAmelCase: Optional[int] ) -> str:
assert (
is_prime(lowerCAmelCase ) and is_prime(lowerCAmelCase ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
_UpperCAmelCase : List[str] = p_number_a + 1 # jump to the next number
_UpperCAmelCase : List[str] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> List[Any]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n >= 1), "'n' must been int and >= 1"
_UpperCAmelCase : List[str] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[int] ) -> List[Any]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (
number > 1
), "'number' must been an int and >= 1"
_UpperCAmelCase : Any = get_divisors(lowerCAmelCase )
# precondition
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Optional[Any] , lowerCAmelCase: Optional[int] ) -> str:
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and isinstance(lowerCAmelCase , lowerCAmelCase )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
_UpperCAmelCase : Tuple = gcd(abs(lowerCAmelCase ) , abs(lowerCAmelCase ) )
# precondition
assert (
isinstance(lowerCAmelCase , lowerCAmelCase )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Any ) -> List[str]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n >= 0), "'n' must been a int and >= 0"
_UpperCAmelCase : int = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] ) -> List[Any]:
assert isinstance(lowerCAmelCase , lowerCAmelCase ) and (n >= 0), "'n' must been an int and >= 0"
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Dict = 1 # this will be return
for _ in range(n - 1 ):
_UpperCAmelCase : Optional[int] = ans
ans += fiba
_UpperCAmelCase : Optional[Any] = tmp
return ans
| 189
|
from __future__ import annotations
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = order
# a_{0} ... a_{k}
_UpperCAmelCase : Tuple = [1.0] + [0.0] * order
# b_{0} ... b_{k}
_UpperCAmelCase : int = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_UpperCAmelCase : Optional[Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
_UpperCAmelCase : Dict = [0.0] * self.order
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if len(A_ ) < self.order:
_UpperCAmelCase : List[str] = [1.0, *a_coeffs]
if len(A_ ) != self.order + 1:
_UpperCAmelCase : List[Any] = (
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
if len(A_ ) != self.order + 1:
_UpperCAmelCase : int = (
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(A_ )}'
)
raise ValueError(A_ )
_UpperCAmelCase : Optional[Any] = a_coeffs
_UpperCAmelCase : Union[str, Any] = b_coeffs
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_UpperCAmelCase : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_UpperCAmelCase : Optional[Any] = self.input_history[:-1]
_UpperCAmelCase : Optional[int] = self.output_history[:-1]
_UpperCAmelCase : Optional[Any] = sample
_UpperCAmelCase : str = result
return result
| 189
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
while b:
_A , _A = b, a % b
return a
def _snake_case ( _snake_case : int , _snake_case : int ) -> int:
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_snake_case , a % b )
def _snake_case ( ) -> Dict:
'''simple docstring'''
print(F'''euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}''' )
print(F'''euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}''' )
print(F'''euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}''' )
print(F'''euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}''' )
print(F'''euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}''' )
print(F'''euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}''' )
print(F'''euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}''' )
print(F'''euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}''' )
print(F'''euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}''' )
if __name__ == "__main__":
main()
| 315
|
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a = logging.getLogger(__name__)
a = 50 # max width of layer names
a = 70 # max width of quantizer names
def _snake_case ( _snake_case : int ) -> List[Any]:
'''simple docstring'''
_A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_snake_case , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_snake_case , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_snake_case , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_snake_case , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_snake_case , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_snake_case , type=_snake_case , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_snake_case , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( _snake_case : Dict ) -> Optional[Any]:
'''simple docstring'''
if args.calibrator == "max":
_A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
_A = 'histogram'
elif args.calibrator == "mse":
_A = 'histogram'
else:
raise ValueError(F'''Invalid calibrator {args.calibrator}''' )
_A = QuantDescriptor(num_bits=args.aprec , calib_method=_snake_case )
_A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(_snake_case )
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any=False , _snake_case : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
logger.info('Configuring Model for Quantization' )
logger.info(F'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_snake_case , ['embeddings'] , which='weight' , _disabled=_snake_case )
if args.quant_disable:
set_quantizer_by_name(_snake_case , [''] , _disabled=_snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(_snake_case , args.quant_disable_keyword , _disabled=_snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(_snake_case , [R'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_snake_case )
if args.recalibrate_weights:
recalibrate_weights(_snake_case )
if args.fuse_qkv:
fuse_qkv(_snake_case , _snake_case )
if args.clip_gelu:
clip_gelu(_snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str ) -> Any:
'''simple docstring'''
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'''{name:80}: {module}''' )
def _snake_case ( _snake_case : List[Any] , _snake_case : List[Any] ) -> str:
'''simple docstring'''
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_snake_case )
def _snake_case ( _snake_case : str , _snake_case : int ) -> str:
'''simple docstring'''
def fusea(_snake_case : int , _snake_case : str , _snake_case : Optional[Any] ):
for mod in [qq, qk, qv]:
if not hasattr(_snake_case , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
_A = qq._amax.detach().item()
_A = qk._amax.detach().item()
_A = qv._amax.detach().item()
_A = max(_snake_case , _snake_case , _snake_case )
qq._amax.fill_(_snake_case )
qk._amax.fill_(_snake_case )
qv._amax.fill_(_snake_case )
logger.info(F''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( _snake_case : int , _snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
_A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_snake_case )
_A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _snake_case ( _snake_case : List[str] ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
_A = mod.weight.shape[0]
_A = mod._weight_quantizer._amax.detach()
_A = torch.ones(_snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(F'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _snake_case ( _snake_case : Dict ) -> Tuple:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_A = set(range(len(mod.weight.size() ) ) ) - axis_set
_A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_snake_case , keepdims=_snake_case ).detach()
logger.info(F'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_A = amax
def _snake_case ( _snake_case : Tuple , _snake_case : List[str]=25 , _snake_case : str=1_80 , _snake_case : int=None ) -> List[Any]:
'''simple docstring'''
if ignore is None:
_A = []
elif not isinstance(_snake_case , _snake_case ):
_A = [ignore]
_A = 0
for name, mod in model.named_modules():
if not hasattr(_snake_case , 'weight' ):
continue
_A = max(_snake_case , len(_snake_case ) )
for name, mod in model.named_modules():
_A = getattr(_snake_case , '_input_quantizer' , _snake_case )
_A = getattr(_snake_case , '_weight_quantizer' , _snake_case )
if not hasattr(_snake_case , 'weight' ):
continue
if type(_snake_case ) in ignore:
continue
if [True for s in ignore if type(_snake_case ) is str and s in name]:
continue
_A = F'''Act:{input_q.extra_repr()}'''
_A = F'''Wgt:{weight_q.extra_repr()}'''
_A = F'''{name:{name_width}} {act_str} {wgt_str}'''
if len(_snake_case ) <= line_width:
logger.info(_snake_case )
else:
logger.info(F'''{name:{name_width}} {act_str}''' )
logger.info(F'''{" ":{name_width}} {wgt_str}''' )
def _snake_case ( _snake_case : Dict ) -> int:
'''simple docstring'''
_A = 0
for name, mod in model.named_modules():
if isinstance(_snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(F'''{name:80} {mod}''' )
count += 1
print(F'''{count} TensorQuantizers found in model''' )
def _snake_case ( _snake_case : str , _snake_case : Dict , _snake_case : List[Any] , _snake_case : Union[str, Any] , _snake_case : Any ) -> int:
'''simple docstring'''
_A = getattr(_snake_case , _snake_case , _snake_case )
if quantizer_mod is not None:
assert hasattr(_snake_case , _snake_case )
setattr(_snake_case , _snake_case , _snake_case )
else:
logger.warning(F'''{name} has no {quantizer}''' )
def _snake_case ( _snake_case : Dict , _snake_case : Optional[int] , _snake_case : str="both" , **_snake_case : List[Any] ) -> str:
'''simple docstring'''
_A = F'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(_snake_case , _snake_case , '_input_quantizer' , _snake_case , _snake_case )
if which in ["weight", "both"]:
set_quantizer(_snake_case , _snake_case , '_weight_quantizer' , _snake_case , _snake_case )
logger.info(_snake_case )
def _snake_case ( _snake_case : Any , _snake_case : int , **_snake_case : Dict ) -> List[str]:
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(_snake_case , '_input_quantizer' ) or hasattr(_snake_case , '_weight_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
set_quantizers(_snake_case , _snake_case , **_snake_case )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_snake_case , _snake_case ):
_A = F'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += F''' {k}={v}'''
setattr(_snake_case , _snake_case , _snake_case )
logger.info(_snake_case )
| 315
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : int = 1_00 ) -> int:
'''simple docstring'''
_A = n * (n + 1) * (2 * n + 1) / 6
_A = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 271
|
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
a = [8, 5, 9, 7]
a = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
a = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowercase_ :
'''simple docstring'''
def __init__( self : str , _UpperCAmelCase : list[int] , _UpperCAmelCase : list[list[int]] , _UpperCAmelCase : list[list[int]] , ):
_A = claim_vector
_A = allocated_resources_table
_A = maximum_claim_table
def lowerCAmelCase_ ( self : Tuple ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase_ ( self : Tuple ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase_ ( self : List[Any] ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_UpperCAmelCase ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase_ ( self : List[Any] ):
return {self.__need().index(_UpperCAmelCase ): i for i in self.__need()}
def lowerCAmelCase_ ( self : List[str] , **_UpperCAmelCase : int ):
_A = self.__need()
_A = self.__allocated_resources_table
_A = self.__available_resources()
_A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_A = False
for each_need in need_list:
_A = True
for index, need in enumerate(_UpperCAmelCase ):
if need > available_resources[index]:
_A = False
break
if execution:
_A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_A = original_need_index
print(F'''Process {process_number + 1} is executing.''' )
# remove the process run from stack
need_list.remove(_UpperCAmelCase )
# update available/freed resources stack
_A = np.array(_UpperCAmelCase ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_UpperCAmelCase ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowerCAmelCase_ ( self : Union[str, Any] ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F'''P{self.__allocated_resources_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F'''P{self.__maximum_claim_table.index(_UpperCAmelCase ) + 1}'''
+ ' '.join(F'''{it:>8}''' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_UpperCAmelCase ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 271
| 1
|
from __future__ import annotations
def _lowerCAmelCase (_lowerCAmelCase):
UpperCamelCase_ = len(__UpperCamelCase)
# We need to create solution object to save path.
UpperCamelCase_ = [[0 for _ in range(__UpperCamelCase)] for _ in range(__UpperCamelCase)]
UpperCamelCase_ = run_maze(__UpperCamelCase , 0 , 0 , __UpperCamelCase)
if solved:
print("\n".join(str(__UpperCamelCase) for row in solutions))
else:
print("No solution exists!")
return solved
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = len(__UpperCamelCase)
# Final check point.
if i == j == (size - 1):
UpperCamelCase_ = 1
return True
UpperCamelCase_ = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase_ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase_ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase_ = 1
# check for directions
if (
run_maze(__UpperCamelCase , i + 1 , __UpperCamelCase , __UpperCamelCase)
or run_maze(__UpperCamelCase , __UpperCamelCase , j + 1 , __UpperCamelCase)
or run_maze(__UpperCamelCase , i - 1 , __UpperCamelCase , __UpperCamelCase)
or run_maze(__UpperCamelCase , __UpperCamelCase , j - 1 , __UpperCamelCase)
):
return True
UpperCamelCase_ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( lowerCamelCase_ ):
def __init__( self : Dict , _lowercase : CLIPSegForImageSegmentation , _lowercase : CLIPSegProcessor , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , ):
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , """steps_offset""" ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
"""to update the config accordingly as leaving `steps_offset` might led to incorrect results"""
""" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"""
""" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"""
""" file"""
)
deprecate("""steps_offset!=1""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if hasattr(scheduler.config , """skip_prk_steps""" ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE__ = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
""" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"""
""" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"""
""" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"""
""" Hub, it would be very nice if you could open a Pull request for the"""
""" `scheduler/scheduler_config.json` file"""
)
deprecate("""skip_prk_steps not set""" , """1.0.0""" , _lowercase , standard_warn=_lowercase )
SCREAMING_SNAKE_CASE__ = dict(scheduler.config )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = FrozenDict(_lowercase )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
segmentation_model=_lowercase , segmentation_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , )
def __a ( self : List[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def __a ( self : Any ):
"""simple docstring"""
self.enable_attention_slicing(_lowercase )
def __a ( self : Optional[int] ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __a ( self : Optional[int] ):
"""simple docstring"""
if self.device != torch.device("""meta""" ) or not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Optional[Any] , _lowercase : Union[str, List[str]] , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] , _lowercase : str , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.segmentation_processor(
text=[text] , images=[image] , padding="""max_length""" , return_tensors="""pt""" ).to(self.device )
SCREAMING_SNAKE_CASE__ = self.segmentation_model(**_lowercase )
SCREAMING_SNAKE_CASE__ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE__ = self.numpy_to_pil(_lowercase )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE__ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_lowercase , image=_lowercase , mask_image=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , )
| 219
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase = '''src/diffusers'''
UpperCamelCase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase = spec.loader.load_module()
def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any]):
return line.startswith(_lowerCamelCase) or len(_lowerCamelCase) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$" , _lowerCamelCase) is not None
def lowercase_ ( _lowerCamelCase : Union[str, Any]):
lowercase__ : Tuple = object_name.split(".")
lowercase__ : int = 0
# First let's find the module where our object lives.
lowercase__ : List[Any] = parts[i]
while i < len(_lowerCamelCase) and not os.path.isfile(os.path.join(_lowerCamelCase , f'''{module}.py''')):
i += 1
if i < len(_lowerCamelCase):
lowercase__ : Optional[int] = os.path.join(_lowerCamelCase , parts[i])
if i >= len(_lowerCamelCase):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(_lowerCamelCase , f'''{module}.py''') , "r" , encoding="utf-8" , newline="\n") as f:
lowercase__ : str = f.readlines()
# Now let's find the class / func in the code!
lowercase__ : Dict = ""
lowercase__ : Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_lowerCamelCase) and re.search(Rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_lowerCamelCase):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase__ : int = line_index
while line_index < len(_lowerCamelCase) and _should_continue(lines[line_index] , _lowerCamelCase):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowercase__ : str = lines[start_index:line_index]
return "".join(_lowerCamelCase)
UpperCamelCase = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
UpperCamelCase = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
UpperCamelCase = re.compile(R'''<FILL\s+[^>]*>''')
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : List[str] = code.split("\n")
lowercase__ : Any = 0
while idx < len(_lowerCamelCase) and len(lines[idx]) == 0:
idx += 1
if idx < len(_lowerCamelCase):
return re.search(R"^(\s*)\S" , lines[idx]).groups()[0]
return ""
def lowercase_ ( _lowerCamelCase : Tuple):
lowercase__ : Union[str, Any] = len(get_indent(_lowerCamelCase)) > 0
if has_indent:
lowercase__ : List[str] = f'''class Bla:\n{code}'''
lowercase__ : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=_lowerCamelCase)
lowercase__ : Tuple = black.format_str(_lowerCamelCase , mode=_lowerCamelCase)
lowercase__ , lowercase__ : Dict = style_docstrings_in_code(_lowerCamelCase)
return result[len("class Bla:\n") :] if has_indent else result
def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any]=False):
with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n") as f:
lowercase__ : Optional[int] = f.readlines()
lowercase__ : Optional[Any] = []
lowercase__ : int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_lowerCamelCase):
lowercase__ : Dict = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase__ , lowercase__ , lowercase__ : Any = search.groups()
lowercase__ : Dict = find_code_in_diffusers(_lowerCamelCase)
lowercase__ : Optional[Any] = get_indent(_lowerCamelCase)
lowercase__ : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase__ : List[str] = theoretical_indent
lowercase__ : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase__ : str = True
while line_index < len(_lowerCamelCase) and should_continue:
line_index += 1
if line_index >= len(_lowerCamelCase):
break
lowercase__ : Dict = lines[line_index]
lowercase__ : List[str] = _should_continue(_lowerCamelCase , _lowerCamelCase) and re.search(f'''^{indent}# End copy''' , _lowerCamelCase) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
lowercase__ : Any = lines[start_index:line_index]
lowercase__ : List[Any] = "".join(_lowerCamelCase)
# Remove any nested `Copied from` comments to avoid circular copies
lowercase__ : List[Any] = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(_lowerCamelCase) is None]
lowercase__ : Optional[Any] = "\n".join(_lowerCamelCase)
# Before comparing, use the `replace_pattern` on the original code.
if len(_lowerCamelCase) > 0:
lowercase__ : Dict = replace_pattern.replace("with" , "").split(",")
lowercase__ : Any = [_re_replace_pattern.search(_lowerCamelCase) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase__ , lowercase__ , lowercase__ : int = pattern.groups()
lowercase__ : List[str] = re.sub(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
if option.strip() == "all-casing":
lowercase__ : Optional[Any] = re.sub(obja.lower() , obja.lower() , _lowerCamelCase)
lowercase__ : int = re.sub(obja.upper() , obja.upper() , _lowerCamelCase)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase__ : Dict = blackify(lines[start_index - 1] + theoretical_code)
lowercase__ : Tuple = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
lowercase__ : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase__ : Optional[int] = start_index + 1
if overwrite and len(_lowerCamelCase) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''')
with open(_lowerCamelCase , "w" , encoding="utf-8" , newline="\n") as f:
f.writelines(_lowerCamelCase)
return diffs
def lowercase_ ( _lowerCamelCase : bool = False):
lowercase__ : Optional[Any] = glob.glob(os.path.join(_lowerCamelCase , "**/*.py") , recursive=_lowerCamelCase)
lowercase__ : str = []
for filename in all_files:
lowercase__ : List[str] = is_copy_consistent(_lowerCamelCase , _lowerCamelCase)
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(_lowerCamelCase) > 0:
lowercase__ : Tuple = "\n".join(_lowerCamelCase)
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.")
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
UpperCamelCase = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 333
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ):
__A : int = StableUnCLIPPipeline
__A : int = TEXT_TO_IMAGE_PARAMS
__A : Any = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : int = False
def __UpperCamelCase ( self : Optional[int] ) -> List[str]:
lowercase__ : str = 32
lowercase__ : Any = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowercase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : List[str] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=lowercase_ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : Any = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowercase_ , num_layers=1 , )
torch.manual_seed(0 )
lowercase__ : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowercase_ , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowercase__ : List[str] = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase__ : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowercase__ : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowercase__ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
lowercase__ : str = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase__ : Any = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase__ : List[str] = AutoencoderKL()
lowercase__ : List[Any] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __UpperCamelCase ( self : Any , lowercase_ : Tuple , lowercase_ : Dict=0 ) -> Any:
if str(lowercase_ ).startswith("mps" ):
lowercase__ : Any = torch.manual_seed(lowercase_ )
else:
lowercase__ : Any = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
lowercase__ : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def __UpperCamelCase ( self : List[Any] ) -> List[str]:
lowercase__ : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def __UpperCamelCase ( self : Tuple ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self : int ) -> int:
lowercase__ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowercase__ : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowercase__ : Dict = pipe("anime turle" , generator=lowercase_ , output_type="np" )
lowercase__ : Optional[int] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__ : Union[str, Any] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowercase__ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase__ : str = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowercase__ : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 333
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCamelCase_ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCamelCase_ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> List[Any]:
'''simple docstring'''
_A = SavedModel()
_A = []
with open(os.path.join(__lowercase , "utils" , "tf_ops" , "onnx.json" ) ) as f:
_A = json.load(__lowercase )["opsets"]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowercase )] )
with open(__lowercase , "rb" ) as f:
saved_model.ParseFromString(f.read() )
_A = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_A = sorted(__lowercase )
_A = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowercase )
if strict and len(__lowercase ) > 0:
raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops )
elif len(__lowercase ) > 0:
print(F'''Found the following incompatible ops for the opset {opset}:''' )
print(*__lowercase , sep="\n" )
else:
print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
lowerCamelCase_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 79
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
A__: str = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : int ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Optional[Any]=None ) -> Optional[Any]:
# Recurse if needed
if "." in tensor_name:
_a : Union[str, Any] =tensor_name.split(""".""" )
for split in splits[:-1]:
_a : Optional[Any] =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
_a : Optional[int] =new_module
_a : Optional[int] =splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"{module} does not have a parameter or a buffer named {tensor_name}." )
_a : Optional[Any] =tensor_name in module._buffers
_a : str =getattr(_UpperCAmelCase ,_UpperCAmelCase )
if old_value.device == torch.device("""meta""" ) and device not in ["meta", torch.device("""meta""" )] and value is None:
raise ValueError(F"{tensor_name} is on the meta device, we need a `value` to put in on {device}." )
_a : int =False
_a : Tuple =False
if is_buffer or not is_bitsandbytes_available():
_a : str =False
_a : Optional[Any] =False
else:
_a : int =hasattr(bnb.nn ,"""Params4bit""" ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
_a : int =isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
_a : Any =module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
_a : int =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : str =value.to("""cpu""" )
if value.dtype == torch.inta:
_a : int =version.parse(importlib.metadata.version("""bitsandbytes""" ) ) > version.parse(
"""0.37.2""" )
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""" )
else:
_a : Dict =torch.tensor(_UpperCAmelCase ,device="""cpu""" )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_UpperCAmelCase ) and fpaa_statistics is None:
_a : int =new_value.T
_a : Any =old_value.__dict__
if is_abit:
_a : Any =bnb.nn.IntaParams(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
elif is_abit:
_a : Union[str, Any] =bnb.nn.Paramsabit(_UpperCAmelCase ,requires_grad=_UpperCAmelCase ,**_UpperCAmelCase ).to(_UpperCAmelCase )
_a : List[Any] =new_value
if fpaa_statistics is not None:
setattr(module.weight ,"""SCB""" ,fpaa_statistics.to(_UpperCAmelCase ) )
else:
if value is None:
_a : str =old_value.to(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,torch.Tensor ):
_a : Any =value.to(_UpperCAmelCase )
else:
_a : str =torch.tensor(_UpperCAmelCase ,device=_UpperCAmelCase )
if is_buffer:
_a : Optional[int] =new_value
else:
_a : Optional[Any] =nn.Parameter(_UpperCAmelCase ,requires_grad=old_value.requires_grad )
_a : Tuple =new_value
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : str=None ,_UpperCAmelCase : Union[str, Any]=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
_a : Optional[int] =[]
current_key_name.append(_UpperCAmelCase )
if (isinstance(_UpperCAmelCase ,nn.Linear ) or isinstance(_UpperCAmelCase ,_UpperCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(_UpperCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a , _a : int =module.weight.shape
else:
_a : List[str] =module.in_features
_a : Tuple =module.out_features
if quantization_config.quantization_method() == "llm_int8":
_a : Optional[Any] =bnb.nn.LinearabitLt(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
_a : Optional[Any] =True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
_a : Dict =bnb.nn.Linearabit(
_UpperCAmelCase ,_UpperCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
_a : List[Any] =True
# Store the module class in case we need to transpose the weight later
_a : int =type(_UpperCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_UpperCAmelCase )
if len(list(module.children() ) ) > 0:
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,has_been_replaced=_UpperCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : int=None ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : Any=None ) -> Tuple:
_a : Dict =["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
_a , _a : List[Any] =_replace_with_bnb_linear(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""" )
return model
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : Any ,**_UpperCAmelCase : Any ) -> str:
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" ,_UpperCAmelCase ,)
return replace_with_bnb_linear(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : str ,**_UpperCAmelCase : Optional[int] ) -> Optional[int]:
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" ,_UpperCAmelCase ,)
return set_module_quantized_tensor_to_device(*_UpperCAmelCase ,**_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> Union[str, Any]:
_a : Any =deepcopy(_UpperCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
_a : List[Any] =find_tied_parameters(_UpperCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str =sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
_a : Optional[int] =sum(_UpperCAmelCase ,[] )
_a : List[Any] =len(_UpperCAmelCase ) > 0
# Check if it is a base model
_a : Tuple =not hasattr(_UpperCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
_a : List[Any] =list(model.named_children() )
_a : Dict =[list_modules[-1][0]]
# add last module together with tied weights
_a : List[str] =set(_UpperCAmelCase ) - set(_UpperCAmelCase )
_a : str =list(set(_UpperCAmelCase ) ) + list(_UpperCAmelCase )
# remove ".weight" from the keys
_a : List[Any] =[""".weight""", """.bias"""]
_a : Any =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
_a : Any =name.replace(_UpperCAmelCase ,"""""" )
filtered_module_names.append(_UpperCAmelCase )
return filtered_module_names
| 276
| 0
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Tuple = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase_ ( __UpperCAmelCase ) -> dict[str, str]:
lowerCAmelCase__ : Optional[int] = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase__ : Dict = remove_duplicates(key.upper() )
lowerCAmelCase__ : Tuple = len(__UpperCAmelCase )
# First fill cipher with key characters
lowerCAmelCase__ : Optional[int] = {alphabet[i]: char for i, char in enumerate(__UpperCAmelCase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__UpperCAmelCase ) , 26 ):
lowerCAmelCase__ : Union[str, Any] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase__ : Optional[Any] = alphabet[i - offset]
lowerCAmelCase__ : Optional[int] = char
return cipher_alphabet
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "".join(cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
lowerCAmelCase__ : Optional[int] = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__UpperCAmelCase , __UpperCAmelCase ) for ch in message.upper() )
def lowercase_ ( ) -> None:
lowerCAmelCase__ : Dict = input("""Enter message to encode or decode: """ ).strip()
lowerCAmelCase__ : Dict = input("""Enter keyword: """ ).strip()
lowerCAmelCase__ : int = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowerCAmelCase__ : Optional[int] = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowerCAmelCase__ : str = create_cipher_map(__UpperCAmelCase )
print(func(__UpperCAmelCase , __UpperCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 212
|
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_A = open # noqa: we just need to have a builtin inside this module to test it properly
| 212
| 1
|
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
UpperCamelCase__ : List[Any] = 0
# Number of processes finished
UpperCamelCase__ : Optional[Any] = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
UpperCamelCase__ : str = [0] * no_of_process
# List to include calculation results
UpperCamelCase__ : str = [0] * no_of_process
# Sort by arrival time.
UpperCamelCase__ : List[str] = [burst_time[i] for i in np.argsort(__lowerCAmelCase )]
UpperCamelCase__ : Optional[Any] = [process_name[i] for i in np.argsort(__lowerCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
UpperCamelCase__ : Dict = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
UpperCamelCase__ : str = arrival_time[i]
UpperCamelCase__ : Tuple = 0
# Index showing the location of the process being performed
UpperCamelCase__ : Union[str, Any] = 0
# Saves the current response ratio.
UpperCamelCase__ : Optional[Any] = 0
for i in range(0 , __lowerCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
UpperCamelCase__ : Tuple = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
UpperCamelCase__ : Optional[Any] = temp
UpperCamelCase__ : List[str] = i
# Calculate the turn around time
UpperCamelCase__ : Tuple = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
UpperCamelCase__ : Any = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list:
UpperCamelCase__ : str = [0] * no_of_process
for i in range(0 , __lowerCAmelCase ):
UpperCamelCase__ : Union[str, Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase : Dict =5
lowerCamelCase : Union[str, Any] =['''A''', '''B''', '''C''', '''D''', '''E''']
lowerCamelCase : Tuple =[1, 2, 3, 4, 5]
lowerCamelCase : str =[1, 2, 3, 4, 5]
lowerCamelCase : int =calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase : Optional[int] =calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('''Process name \tArrival time \tBurst time \tTurn around time \tWaiting time''')
for i in range(0, no_of_process):
print(
F"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t"""
F"""{turn_around_time[i]}\t\t\t{waiting_time[i]}"""
)
print(F"""average waiting time : {mean(waiting_time):.5f}""")
print(F"""average turn around time : {mean(turn_around_time):.5f}""")
| 189
|
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ) -> Tuple:
with open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) as f:
UpperCamelCase__ : Optional[Any] = json.load(__lowerCAmelCase )
UpperCamelCase__ : str = {}
UpperCamelCase__ : List[Any] = []
UpperCamelCase__ : int = []
for key, info in class_info.items():
UpperCamelCase__ : List[str] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(__lowerCAmelCase ) )
UpperCamelCase__ : Dict = thing_ids
UpperCamelCase__ : Optional[int] = class_names
return metadata
class __a ( unittest.TestCase ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Tuple=30 , SCREAMING_SNAKE_CASE : Dict=4_00 , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Optional[int]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : List[str]=[0.5, 0.5, 0.5] , SCREAMING_SNAKE_CASE : Optional[Any]=10 , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : int=2_55 , SCREAMING_SNAKE_CASE : str="shi-labs/oneformer_demo" , SCREAMING_SNAKE_CASE : List[Any]="ade20k_panoptic.json" , SCREAMING_SNAKE_CASE : Tuple=10 , ):
'''simple docstring'''
UpperCamelCase__ : Tuple = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : Optional[int] = min_resolution
UpperCamelCase__ : Union[str, Any] = max_resolution
UpperCamelCase__ : Optional[int] = do_resize
UpperCamelCase__ : List[Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
UpperCamelCase__ : Dict = do_normalize
UpperCamelCase__ : Optional[int] = image_mean
UpperCamelCase__ : Union[str, Any] = image_std
UpperCamelCase__ : Union[str, Any] = class_info_file
UpperCamelCase__ : Tuple = prepare_metadata(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_text
UpperCamelCase__ : int = repo_path
# for the post_process_functions
UpperCamelCase__ : int = 2
UpperCamelCase__ : str = 10
UpperCamelCase__ : Any = 10
UpperCamelCase__ : Union[str, Any] = 3
UpperCamelCase__ : List[Any] = 4
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Tuple = do_reduce_labels
UpperCamelCase__ : List[str] = ignore_index
def __lowercase ( self : int ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def __lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str]=False ):
'''simple docstring'''
if not batched:
UpperCamelCase__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE , Image.Image ):
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = image.size
else:
UpperCamelCase__ , UpperCamelCase__ : int = image.shape[1], image.shape[2]
if w < h:
UpperCamelCase__ : Any = int(self.size["shortest_edge"] * h / w )
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
elif w > h:
UpperCamelCase__ : Union[str, Any] = self.size["shortest_edge"]
UpperCamelCase__ : int = int(self.size["shortest_edge"] * w / h )
else:
UpperCamelCase__ : Optional[Any] = self.size["shortest_edge"]
UpperCamelCase__ : str = self.size["shortest_edge"]
else:
UpperCamelCase__ : Tuple = []
for image in image_inputs:
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCamelCase__ : List[str] = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[0] )[0]
UpperCamelCase__ : int = max(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
def __lowercase ( self : Any ):
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class __a ( A__ , unittest.TestCase ):
_lowerCAmelCase : Tuple = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
_lowerCAmelCase : List[str] = image_processing_class
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = OneFormerImageProcessorTester(self )
@property
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "ignore_index" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "class_info_file" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "num_text" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "repo_path" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "metadata" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE , "do_reduce_labels" ) )
def __lowercase ( self : List[str] ):
'''simple docstring'''
pass
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
UpperCamelCase__ : Optional[int] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Dict = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , numpify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
UpperCamelCase__ : Any = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE , torchify=SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
UpperCamelCase__ : List[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
UpperCamelCase__ , UpperCamelCase__ : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCamelCase__ , UpperCamelCase__ : List[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowercase ( self : int , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : Any="np" ):
'''simple docstring'''
UpperCamelCase__ : Any = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
UpperCamelCase__ : Any = self.image_processing_tester.num_labels
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : Optional[Any] = None
UpperCamelCase__ : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE )
if with_segmentation_maps:
UpperCamelCase__ : Tuple = num_labels
if is_instance_map:
UpperCamelCase__ : List[str] = list(range(SCREAMING_SNAKE_CASE ) ) * 2
UpperCamelCase__ : Optional[Any] = dict(enumerate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase__ : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
UpperCamelCase__ : List[str] = [Image.fromarray(SCREAMING_SNAKE_CASE ) for annotation in annotations]
UpperCamelCase__ : Optional[int] = image_processor(
SCREAMING_SNAKE_CASE , ["semantic"] * len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , return_tensors="pt" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE , )
return inputs
def __lowercase ( self : int ):
'''simple docstring'''
pass
def __lowercase ( self : str ):
'''simple docstring'''
def common(SCREAMING_SNAKE_CASE : Optional[int]=False , SCREAMING_SNAKE_CASE : str=None ):
UpperCamelCase__ : Any = self.comm_get_image_processor_inputs(
with_segmentation_maps=SCREAMING_SNAKE_CASE , is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = inputs["mask_labels"]
UpperCamelCase__ : Optional[Any] = inputs["class_labels"]
UpperCamelCase__ : List[str] = inputs["pixel_values"]
UpperCamelCase__ : int = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=SCREAMING_SNAKE_CASE )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
common(is_instance_map=SCREAMING_SNAKE_CASE , segmentation_type="pil" )
def __lowercase ( self : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = np.zeros((20, 50) )
UpperCamelCase__ : int = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : Dict = 1
UpperCamelCase__ : int = binary_mask_to_rle(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def __lowercase ( self : Tuple ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : int = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
UpperCamelCase__ : List[str] = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
UpperCamelCase__ : Optional[Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE , target_sizes=SCREAMING_SNAKE_CASE )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def __lowercase ( self : str ):
'''simple docstring'''
UpperCamelCase__ : Tuple = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : List[str] = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Optional[int] = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : int = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
UpperCamelCase__ : Any = self.image_processing_tester.get_fake_oneformer_outputs()
UpperCamelCase__ : Tuple = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE , threshold=0 )
self.assertTrue(len(SCREAMING_SNAKE_CASE ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , SCREAMING_SNAKE_CASE )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 189
| 1
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
_SCREAMING_SNAKE_CASE = (
"""4S 3H 2C 7S 5H""",
"""9D 8H 2C 6S 7H""",
"""2D 6D 9D TH 7D""",
"""TC 8C 2S JH 6C""",
"""JH 8S TH AH QH""",
"""TS KS 5S 9S AC""",
"""KD 6S 9D TH AD""",
"""KS 8D 4D 9S 4S""", # pair
"""8C 4S KH JS 4D""", # pair
"""QH 8H KD JH 8S""", # pair
"""KC 4H KS 2H 8D""", # pair
"""KD 4S KC 3H 8S""", # pair
"""AH 8S AS KC JH""", # pair
"""3H 4C 4H 3S 2H""", # 2 pairs
"""5S 5D 2C KH KH""", # 2 pairs
"""3C KH 5D 5S KH""", # 2 pairs
"""AS 3C KH AD KH""", # 2 pairs
"""7C 7S 3S 7H 5S""", # 3 of a kind
"""7C 7S KH 2H 7H""", # 3 of a kind
"""AC KH QH AH AS""", # 3 of a kind
"""2H 4D 3C AS 5S""", # straight (low ace)
"""3C 5C 4C 2C 6H""", # straight
"""6S 8S 7S 5H 9H""", # straight
"""JS QS 9H TS KH""", # straight
"""QC KH TS JS AH""", # straight (high ace)
"""8C 9C 5C 3C TC""", # flush
"""3S 8S 9S 5S KS""", # flush
"""4C 5C 9C 8C KC""", # flush
"""JH 8H AH KH QH""", # flush
"""3D 2H 3H 2C 2D""", # full house
"""2H 2C 3S 3H 3D""", # full house
"""KH KC 3S 3H 3D""", # full house
"""JC 6H JS JD JH""", # 4 of a kind
"""JC 7H JS JD JH""", # 4 of a kind
"""JC KH JS JD JH""", # 4 of a kind
"""2S AS 4S 5S 3S""", # straight flush (low ace)
"""2D 6D 3D 4D 5D""", # straight flush
"""5C 6C 3C 7C 4C""", # straight flush
"""JH 9H TH KH QH""", # straight flush
"""JH AH TH KH QH""", # royal flush (high ace straight flush)
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", """KS AS TS QS JS""", """Loss"""),
("""2H 3H 4H 5H 6H""", """AS AD AC AH JD""", """Win"""),
("""AS AH 2H AD AC""", """JS JD JC JH 3D""", """Win"""),
("""2S AH 2H AS AC""", """JS JD JC JH AD""", """Loss"""),
("""2S AH 2H AS AC""", """2H 3H 5H 6H 7H""", """Win"""),
("""AS 3S 4S 8S 2S""", """2H 3H 5H 6H 7H""", """Win"""),
("""2H 3H 5H 6H 7H""", """2S 3H 4H 5S 6C""", """Win"""),
("""2S 3H 4H 5S 6C""", """3D 4C 5H 6H 2S""", """Tie"""),
("""2S 3H 4H 5S 6C""", """AH AC 5H 6H AS""", """Win"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H AS""", """Loss"""),
("""2S 2H 4H 5S 4C""", """AH AC 5H 6H 7S""", """Win"""),
("""6S AD 7H 4S AS""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S AH 4H 5S KC""", """AH AC 5H 6H 7S""", """Loss"""),
("""2S 3H 6H 7S 9C""", """7H 3C TH 6H 9S""", """Loss"""),
("""4S 5H 6H TS AC""", """3S 5H 6H TS AC""", """Win"""),
("""2S AH 4H 5S 6C""", """AD 4C 5H 6H 2C""", """Tie"""),
("""AS AH 3H AD AC""", """AS AH 2H AD AC""", """Win"""),
("""AH AC 5H 5C QS""", """AH AC 5H 5C KS""", """Loss"""),
("""AH AC 5H 5C QS""", """KH KC 5H 5C QS""", """Win"""),
("""7C 7S KH 2H 7H""", """3C 3S AH 2H 3H""", """Win"""),
("""3C 3S AH 2H 3H""", """7C 7S KH 2H 7H""", """Loss"""),
("""6H 5H 4H 3H 2H""", """5H 4H 3H 2H AH""", """Win"""),
("""5H 4H 3H 2H AH""", """5H 4H 3H 2H AH""", """Tie"""),
("""5H 4H 3H 2H AH""", """6H 5H 4H 3H 2H""", """Loss"""),
("""AH AD KS KC AC""", """AH KD KH AC KC""", """Win"""),
("""2H 4D 3C AS 5S""", """2H 4D 3C 6S 5S""", """Loss"""),
("""2H 3S 3C 3H 2S""", """3S 3C 2S 2H 2D""", """Win"""),
("""4D 6D 5D 2D JH""", """3S 8S 3H TC KH""", """Loss"""),
("""4S 6C 8S 3S 7S""", """AD KS 2D 7D 7C""", """Loss"""),
("""6S 4C 7H 8C 3H""", """5H JC AH 9D 9C""", """Loss"""),
("""9D 9H JH TC QH""", """3C 2S JS 5C 7H""", """Win"""),
("""2H TC 8S AD 9S""", """4H TS 7H 2C 5C""", """Win"""),
("""9D 3S 2C 7S 7C""", """JC TD 3C TC 9H""", """Loss"""),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", True),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", False),
("""AS 3S 4S 8S 2S""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 3H 4H 5H 6H""", True),
("""AS AH 2H AD AC""", False),
("""2H 3H 5H 6H 7H""", False),
("""KS AS TS QS JS""", True),
("""8H 9H QS JS TH""", True),
)
_SCREAMING_SNAKE_CASE = (
("""2H 4D 3C AS 5S""", True, [5, 4, 3, 2, 1_4]),
("""2H 5D 3C AS 5S""", False, [1_4, 5, 5, 3, 2]),
("""JH QD KC AS TS""", False, [1_4, 1_3, 1_2, 1_1, 1_0]),
("""9D 3S 2C 7S 7C""", False, [9, 7, 7, 3, 2]),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 0),
("""JH 9H TH KH QH""", 0),
("""JC KH JS JD JH""", 7),
("""KH KC 3S 3H 3D""", 6),
("""8C 9C 5C 3C TC""", 0),
("""JS QS 9H TS KH""", 0),
("""7C 7S KH 2H 7H""", 3),
("""3C KH 5D 5S KH""", 2),
("""QH 8H KD JH 8S""", 1),
("""2D 6D 9D TH 7D""", 0),
)
_SCREAMING_SNAKE_CASE = (
("""JH AH TH KH QH""", 2_3),
("""JH 9H TH KH QH""", 2_2),
("""JC KH JS JD JH""", 2_1),
("""KH KC 3S 3H 3D""", 2_0),
("""8C 9C 5C 3C TC""", 1_9),
("""JS QS 9H TS KH""", 1_8),
("""7C 7S KH 2H 7H""", 1_7),
("""3C KH 5D 5S KH""", 1_6),
("""QH 8H KD JH 8S""", 1_5),
("""2D 6D 9D TH 7D""", 1_4),
)
def lowercase( ) -> Any:
'''simple docstring'''
UpperCamelCase = randrange(len(__UpperCAmelCase ) ), randrange(len(__UpperCAmelCase ) )
UpperCamelCase = ['''Loss''', '''Tie''', '''Win'''][(play >= oppo) + (play > oppo)]
UpperCamelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowercase( UpperCamelCase_ = 100 ) -> Dict:
'''simple docstring'''
return (generate_random_hand() for _ in range(__UpperCAmelCase ))
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = PokerHand(__UpperCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , __UpperCAmelCase )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
assert PokerHand(__UpperCAmelCase ).compare_with(PokerHand(__UpperCAmelCase ) ) == expected
def lowercase( ) -> Dict:
'''simple docstring'''
UpperCamelCase = [PokerHand(__UpperCAmelCase ) for hand in SORTED_HANDS]
UpperCamelCase = poker_hands.copy()
shuffle(__UpperCAmelCase )
UpperCamelCase = chain(sorted(__UpperCAmelCase ) )
for index, hand in enumerate(__UpperCAmelCase ):
assert hand == poker_hands[index]
def lowercase( ) -> Dict:
'''simple docstring'''
# Test that five high straights are compared correctly.
UpperCamelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=__UpperCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowercase( ) -> Dict:
'''simple docstring'''
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
UpperCamelCase = PokerHand("""2C 4S AS 3D 5C""" )
UpperCamelCase = True
UpperCamelCase = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowercase( ) -> int:
'''simple docstring'''
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
UpperCamelCase = 0
UpperCamelCase = os.path.abspath(os.path.dirname(__UpperCAmelCase ) )
UpperCamelCase = os.path.join(__UpperCAmelCase , """poker_hands.txt""" )
with open(__UpperCAmelCase ) as file_hand:
for line in file_hand:
UpperCamelCase = line[:14].strip()
UpperCamelCase = line[15:].strip()
UpperCamelCase = PokerHand(__UpperCAmelCase ), PokerHand(__UpperCAmelCase )
UpperCamelCase = player.compare_with(__UpperCAmelCase )
if output == "Win":
answer += 1
assert answer == 376
| 352
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"""vocab_file""": """vocab.txt"""}
_SCREAMING_SNAKE_CASE = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
_SCREAMING_SNAKE_CASE = {
"""openbmb/cpm-ant-10b""": 1_0_2_4,
}
def lowercase( UpperCamelCase_ ) -> Tuple:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(UpperCamelCase_ , """r""" , encoding="""utf-8""" ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(UpperCamelCase_ ):
UpperCamelCase = token.rstrip("""\n""" )
UpperCamelCase = index
return vocab
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
def __init__( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : Union[str, Any]="<unk>" , lowerCamelCase_ : Any=200 ):
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = list(lowerCamelCase_ )
if len(lowerCamelCase_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(lowerCamelCase_ ):
UpperCamelCase = len(lowerCamelCase_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = """""".join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCamelCase_ )
UpperCamelCase = end
return sub_tokens
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase = False
def __init__( self : Tuple , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any]="<d>" , lowerCamelCase_ : List[Any]="</d>" , lowerCamelCase_ : Optional[Any]="<s>" , lowerCamelCase_ : List[str]="</s>" , lowerCamelCase_ : int="<pad>" , lowerCamelCase_ : List[Any]="<unk>" , lowerCamelCase_ : Optional[Any]="</n>" , lowerCamelCase_ : Tuple="</_>" , lowerCamelCase_ : Any="left" , **lowerCamelCase_ : str , ):
"""simple docstring"""
requires_backends(self , ["""jieba"""] )
super().__init__(
bod_token=lowerCamelCase_ , eod_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , line_token=lowerCamelCase_ , space_token=lowerCamelCase_ , padding_side=lowerCamelCase_ , **lowerCamelCase_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(lowerCamelCase_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def lowerCamelCase_ ( self : List[str] ):
"""simple docstring"""
return self.encoder["\n"]
@property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(lowerCamelCase_ , cut_all=lowerCamelCase_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCamelCase_ ) )
return output_tokens
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Tuple , **lowerCamelCase_ : Tuple ):
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCamelCase_ , **lowerCamelCase_ )
def lowerCamelCase_ ( self : int , lowerCamelCase_ : int ):
"""simple docstring"""
return token in self.encoder
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return "".join(lowerCamelCase_ )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : str , lowerCamelCase_ : Optional[str] = None ):
"""simple docstring"""
if os.path.isdir(lowerCamelCase_ ):
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
else:
UpperCamelCase = (filename_prefix + """-""" if filename_prefix else """""") + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[""" """]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder["""\n"""]
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCamelCase_ : x[1] ) )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
UpperCamelCase = token_index
writer.write(token + """\n""" )
index += 1
return (vocab_file,)
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[int] , lowerCamelCase_ : List[int] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCamelCase_ ( self : str , lowerCamelCase_ : List[int] , lowerCamelCase_ : Optional[List[int]] = None , lowerCamelCase_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1] + ([0] * len(lowerCamelCase_ ))
return [1] + ([0] * len(lowerCamelCase_ ))
| 165
| 0
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = '''Speech2TextFeatureExtractor'''
__UpperCAmelCase : Any = '''Speech2TextTokenizer'''
def __init__( self : Tuple ,_a : List[str] ,_a : Any ):
'''simple docstring'''
super().__init__(_a ,_a )
_a : str = self.feature_extractor
_a : List[str] = False
def __call__( self : List[str] ,*_a : str ,**_a : List[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_a ,**_a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
_a : Optional[int] = kwargs.pop('raw_speech' )
else:
_a : Optional[Any] = kwargs.pop('audio' ,_a )
_a : Any = kwargs.pop('sampling_rate' ,_a )
_a : str = kwargs.pop('text' ,_a )
if len(_a ) > 0:
_a : str = args[0]
_a : Dict = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_a : Optional[Any] = self.feature_extractor(_a ,*_a ,sampling_rate=_a ,**_a )
if text is not None:
_a : Any = self.tokenizer(_a ,**_a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a : int = encodings['input_ids']
return inputs
def __lowercase ( self : List[str] ,*_a : Optional[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def __lowercase ( self : List[Any] ,*_a : Optional[Any] ,**_a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@contextmanager
def __lowercase ( self : List[str] ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
_a : int = True
_a : Dict = self.tokenizer
yield
_a : Any = self.feature_extractor
_a : Any = False
| 271
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Tuple ,_a : T ):
'''simple docstring'''
_a : List[str] = data
_a : Node[T] | None = None
def __str__( self : Dict ):
'''simple docstring'''
return F"""{self.data}"""
class UpperCAmelCase__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[int] ):
'''simple docstring'''
_a : Node[T] | None = None
def __iter__( self : str ):
'''simple docstring'''
_a : Tuple = self.top
while node:
yield node.data
_a : int = node.next
def __str__( self : str ):
'''simple docstring'''
return "->".join([str(_a ) for item in self] )
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def __lowercase ( self : str ):
'''simple docstring'''
return self.top is None
def __lowercase ( self : List[Any] ,_a : T ):
'''simple docstring'''
_a : int = Node(_a )
if not self.is_empty():
_a : Optional[Any] = self.top
_a : List[str] = node
def __lowercase ( self : Tuple ):
'''simple docstring'''
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top ,_a )
_a : List[Any] = self.top
_a : int = self.top.next
return pop_node.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Optional[int] = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 271
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json',
}
class UpperCamelCase ( snake_case_ ):
UpperCamelCase : int = '''lxmert'''
UpperCamelCase : Tuple = {}
def __init__( self : Dict , UpperCAmelCase__ : Union[str, Any]=30522 , UpperCAmelCase__ : str=768 , UpperCAmelCase__ : Any=12 , UpperCAmelCase__ : Dict=9500 , UpperCAmelCase__ : Tuple=1600 , UpperCAmelCase__ : int=400 , UpperCAmelCase__ : List[str]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : int=512 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]=0.0_2 , UpperCAmelCase__ : Optional[int]=1E-12 , UpperCAmelCase__ : int=9 , UpperCAmelCase__ : Union[str, Any]=5 , UpperCAmelCase__ : Optional[Any]=5 , UpperCAmelCase__ : Tuple=2048 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Union[str, Any]=6.6_7 , UpperCAmelCase__ : Any=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str=True , **UpperCAmelCase__ : List[Any] , ) -> Optional[int]:
_a : Optional[int] = vocab_size
_a : List[str] = hidden_size
_a : Any = num_attention_heads
_a : str = hidden_act
_a : Union[str, Any] = intermediate_size
_a : int = hidden_dropout_prob
_a : List[str] = attention_probs_dropout_prob
_a : List[str] = max_position_embeddings
_a : Dict = type_vocab_size
_a : int = initializer_range
_a : Union[str, Any] = layer_norm_eps
_a : Optional[int] = num_qa_labels
_a : int = num_object_labels
_a : List[Any] = num_attr_labels
_a : List[Any] = l_layers
_a : Tuple = x_layers
_a : Union[str, Any] = r_layers
_a : Dict = visual_feat_dim
_a : Optional[int] = visual_pos_dim
_a : Dict = visual_loss_normalizer
_a : Any = task_matched
_a : int = task_mask_lm
_a : Any = task_obj_predict
_a : int = task_qa
_a : Union[str, Any] = visual_obj_loss
_a : str = visual_attr_loss
_a : List[Any] = visual_feat_loss
_a : Optional[int] = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**UpperCAmelCase__ )
| 350
|
"""simple docstring"""
_snake_case = 8.31_44_62 # Unit - J mol-1 K-1
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 324
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ : List[str] = 'src/diffusers'
A_ : str = '.'
# This is to make sure the diffusers module imported is the one in the repo.
A_ : Union[str, Any] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
A_ : int = spec.loader.load_module()
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
return line.startswith(SCREAMING_SNAKE_CASE ) or len(SCREAMING_SNAKE_CASE ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , SCREAMING_SNAKE_CASE ) is not None
def __a ( SCREAMING_SNAKE_CASE ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = object_name.split('''.''' )
__UpperCAmelCase = 0
# First let's find the module where our object lives.
__UpperCAmelCase = parts[i]
while i < len(SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) ):
i += 1
if i < len(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = os.path.join(SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' )
with open(os.path.join(SCREAMING_SNAKE_CASE , f'''{module}.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
# Now let's find the class / func in the code!
__UpperCAmelCase = ''''''
__UpperCAmelCase = 0
for name in parts[i + 1 :]:
while (
line_index < len(SCREAMING_SNAKE_CASE ) and re.search(rf'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
raise ValueError(f''' {object_name} does not match any function or class in {module}.''' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__UpperCAmelCase = line_index
while line_index < len(SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase = lines[start_index:line_index]
return "".join(SCREAMING_SNAKE_CASE )
A_ : Any = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
A_ : List[Any] = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
A_ : Optional[int] = re.compile(R'<FILL\s+[^>]*>')
def __a ( SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = code.split('''\n''' )
__UpperCAmelCase = 0
while idx < len(SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(SCREAMING_SNAKE_CASE ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def __a ( SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = len(get_indent(SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
__UpperCAmelCase = f'''class Bla:\n{code}'''
__UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = black.format_str(SCREAMING_SNAKE_CASE , mode=SCREAMING_SNAKE_CASE )
__UpperCAmelCase , __UpperCAmelCase = style_docstrings_in_code(SCREAMING_SNAKE_CASE )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = []
__UpperCAmelCase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(SCREAMING_SNAKE_CASE ):
__UpperCAmelCase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = search.groups()
__UpperCAmelCase = find_code_in_diffusers(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = get_indent(SCREAMING_SNAKE_CASE )
__UpperCAmelCase = line_index + 1 if indent == theoretical_indent else line_index + 2
__UpperCAmelCase = theoretical_indent
__UpperCAmelCase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__UpperCAmelCase = True
while line_index < len(SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(SCREAMING_SNAKE_CASE ):
break
__UpperCAmelCase = lines[line_index]
__UpperCAmelCase = _should_continue(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and re.search(f'''^{indent}# End copy''' , SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__UpperCAmelCase = lines[start_index:line_index]
__UpperCAmelCase = ''''''.join(SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
__UpperCAmelCase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(SCREAMING_SNAKE_CASE ) is None]
__UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCAmelCase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__UpperCAmelCase = [_re_replace_pattern.search(SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = pattern.groups()
__UpperCAmelCase = re.sub(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
__UpperCAmelCase = re.sub(obja.lower() , obja.lower() , SCREAMING_SNAKE_CASE )
__UpperCAmelCase = re.sub(obja.upper() , obja.upper() , SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__UpperCAmelCase = blackify(lines[start_index - 1] + theoretical_code )
__UpperCAmelCase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__UpperCAmelCase = lines[:start_index] + [theoretical_code] + lines[line_index:]
__UpperCAmelCase = start_index + 1
if overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(f'''Detected changes, rewriting {filename}.''' )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(SCREAMING_SNAKE_CASE )
return diffs
def __a ( SCREAMING_SNAKE_CASE = False ) -> str:
'''simple docstring'''
__UpperCAmelCase = glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''**/*.py''' ) , recursive=SCREAMING_SNAKE_CASE )
__UpperCAmelCase = []
for filename in all_files:
__UpperCAmelCase = is_copy_consistent(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
diffs += [f'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(SCREAMING_SNAKE_CASE ) > 0:
__UpperCAmelCase = '''\n'''.join(SCREAMING_SNAKE_CASE )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
A_ : Dict = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 333
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : int = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
A_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 333
| 1
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> Union[str, Any]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
lowercase = row[0]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(__SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(__SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(__SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(__SCREAMING_SNAKE_CASE )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> Any:
'''simple docstring'''
if len(__SCREAMING_SNAKE_CASE ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(__SCREAMING_SNAKE_CASE ) + 1
if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(__SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ):
if 0 not in row:
lowercase = data_set.pop(__SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , __SCREAMING_SNAKE_CASE )
lowercase = data_set.copy()
lowercase = simplify(__SCREAMING_SNAKE_CASE )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(__SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(__SCREAMING_SNAKE_CASE )
lowercase = []
for item in solutions:
final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : str =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 370
|
"""simple docstring"""
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list[list]:
'''simple docstring'''
lowercase = current_set.copy()
for row_index, row in enumerate(lowerCAmelCase__ ):
lowercase = row[0]
for column_index, column in enumerate(lowerCAmelCase__ ):
if magnitude == 0:
lowercase = column
continue
lowercase = column / magnitude
# Subtract to cancel term
lowercase = current_set[0]
lowercase = [first_row]
lowercase = current_set[1::]
for row in current_set:
lowercase = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(lowerCAmelCase__ )
continue
for column_index in range(len(lowerCAmelCase__ ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(lowerCAmelCase__ )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
lowercase = final_set[0]
lowercase = []
lowercase = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
lowercase = simplify(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , lowerCAmelCase__ )
lowercase = resultant
return final_set
def UpperCAmelCase__ ( lowerCAmelCase__ :list[list] ) -> list:
'''simple docstring'''
if len(lowerCAmelCase__ ) == 0:
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
lowercase = len(lowerCAmelCase__ ) + 1
if any(len(lowerCAmelCase__ ) != _length for item in equations ):
raise IndexError("""solve_simultaneous() requires n lists of length n+1""" )
for row in equations:
if any(not isinstance(lowerCAmelCase__ , (int, float) ) for column in row ):
raise ValueError("""solve_simultaneous() requires lists of integers""" )
if len(lowerCAmelCase__ ) == 1:
return [equations[0][-1] / equations[0][0]]
lowercase = equations.copy()
if any(0 in row for row in data_set ):
lowercase = data_set.copy()
lowercase = []
for row_index, row in enumerate(lowerCAmelCase__ ):
if 0 not in row:
lowercase = data_set.pop(lowerCAmelCase__ )
break
if not full_row:
raise ValueError("""solve_simultaneous() requires at least 1 full equation""" )
data_set.insert(0 , lowerCAmelCase__ )
lowercase = data_set.copy()
lowercase = simplify(lowerCAmelCase__ )
lowercase = simplified[::-1]
lowercase = []
for row in simplified:
lowercase = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
lowercase = row.copy()[: len(lowerCAmelCase__ ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(lowerCAmelCase__ ) == 0:
solutions.append(0 )
continue
lowercase = temp_row[1::]
lowercase = temp_row[::-1]
for column_index, column in enumerate(lowerCAmelCase__ ):
current_solution -= column * solutions[column_index]
solutions.append(lowerCAmelCase__ )
lowercase = []
for item in solutions:
final.append(float(round(lowerCAmelCase__ , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCAmelCase : List[str] =[
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 32
| 0
|
class A__ :
def __init__( self : Optional[Any] , a : list ):
'''simple docstring'''
lowerCAmelCase__ : Dict = set_counts
lowerCAmelCase__ : str = max(a )
lowerCAmelCase__ : Any = len(a )
lowerCAmelCase__ : List[str] = [1] * num_sets
lowerCAmelCase__ : Dict = list(range(a ) )
def _lowerCamelCase ( self : Dict , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.get_parent(a )
lowerCAmelCase__ : Tuple = self.get_parent(a )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCAmelCase__ : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Tuple = src_parent
lowerCAmelCase__ : Optional[int] = self.set_counts[src_parent]
lowerCAmelCase__ : Optional[Any] = max(self.max_set , a )
return True
def _lowerCamelCase ( self : Any , a : int ):
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowerCAmelCase__ : Tuple = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 212
|
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class A__ :
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def _lowerCamelCase ( self : Optional[int] , a : List[Any]=True , a : Any=False , a : Dict=False , a : Union[str, Any]=False , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 4
lowerCAmelCase__ : int = 32
lowerCAmelCase__ : Tuple = (32, 32)
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = torch.device(a )
lowerCAmelCase__ : str = (batch_size, num_channels) + sizes
lowerCAmelCase__ : Tuple = randn_tensor(a , generator=a , device=a )
lowerCAmelCase__ : Optional[Any] = {'hidden_states': hidden_states}
if include_temb:
lowerCAmelCase__ : int = 128
lowerCAmelCase__ : List[str] = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
lowerCAmelCase__ : int = torch.manual_seed(1 )
lowerCAmelCase__ : str = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
lowerCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
lowerCAmelCase__ : Union[str, Any] = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
lowerCAmelCase__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
lowerCAmelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : int = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
lowerCAmelCase__ : int = unet_block(**a )
if isinstance(a , a ):
lowerCAmelCase__ : List[str] = output[0]
self.assertEqual(output.shape , self.output_shape )
lowerCAmelCase__ : List[str] = output[0, -1, -3:, -3:]
lowerCAmelCase__ : Any = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.prepare_init_args_and_inputs_for_common()
lowerCAmelCase__ : Any = self.block_class(**a )
model.to(a )
model.train()
lowerCAmelCase__ : int = model(**a )
if isinstance(a , a ):
lowerCAmelCase__ : Dict = output[0]
lowerCAmelCase__ : Optional[int] = torch.device(a )
lowerCAmelCase__ : List[Any] = randn_tensor(output.shape , device=a )
lowerCAmelCase__ : List[Any] = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 212
| 1
|
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = SwinConfig(image_size=192 )
if "base" in model_name:
UpperCAmelCase_ : Dict = 6
UpperCAmelCase_ : Union[str, Any] = 128
UpperCAmelCase_ : List[str] = (2, 2, 18, 2)
UpperCAmelCase_ : Dict = (4, 8, 16, 32)
elif "large" in model_name:
UpperCAmelCase_ : Union[str, Any] = 12
UpperCAmelCase_ : Union[str, Any] = 192
UpperCAmelCase_ : Union[str, Any] = (2, 2, 18, 2)
UpperCAmelCase_ : List[str] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
UpperCAmelCase_ : str = window_size
UpperCAmelCase_ : List[Any] = embed_dim
UpperCAmelCase_ : Tuple = depths
UpperCAmelCase_ : Optional[Any] = num_heads
return config
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
if "encoder.mask_token" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
UpperCAmelCase_ : List[str] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
UpperCAmelCase_ : Tuple = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
UpperCAmelCase_ : int = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
UpperCAmelCase_ : Dict = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
UpperCAmelCase_ : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
UpperCAmelCase_ : Any = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
UpperCAmelCase_ : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
UpperCAmelCase_ : Union[str, Any] = "layernorm.weight"
if name == "encoder.norm.bias":
UpperCAmelCase_ : List[str] = "layernorm.bias"
if "decoder" in name:
pass
else:
UpperCAmelCase_ : Tuple = "swin." + name
return name
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Optional[int] = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn_mask" in key:
pass
elif "qkv" in key:
UpperCAmelCase_ : Optional[int] = key.split('''.''' )
UpperCAmelCase_ : List[Any] = int(key_split[2] )
UpperCAmelCase_ : Optional[Any] = int(key_split[4] )
UpperCAmelCase_ : Any = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
UpperCAmelCase_ : Tuple = val[:dim, :]
UpperCAmelCase_ : List[str] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Dict = val[-dim:, :]
else:
UpperCAmelCase_ : List[Any] = val[
:dim
]
UpperCAmelCase_ : int = val[
dim : dim * 2
]
UpperCAmelCase_ : Dict = val[
-dim:
]
else:
UpperCAmelCase_ : Optional[Any] = val
return orig_state_dict
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = torch.load(_SCREAMING_SNAKE_CASE , map_location='''cpu''' )["model"]
UpperCAmelCase_ : List[str] = get_swin_config(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = SwinForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Union[str, Any] = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ : Any = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
UpperCAmelCase_ : List[str] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
UpperCAmelCase_ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__a = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 351
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = 42
class __a( _a , _a ):
"""simple docstring"""
lowerCAmelCase = True
@register_to_config
def __init__( self ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = 3 ,_SCREAMING_SNAKE_CASE = ("DownEncoderBlock2D",) ,_SCREAMING_SNAKE_CASE = ("UpDecoderBlock2D",) ,_SCREAMING_SNAKE_CASE = (64,) ,_SCREAMING_SNAKE_CASE = 1 ,_SCREAMING_SNAKE_CASE = "silu" ,_SCREAMING_SNAKE_CASE = 4 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 32 ,_SCREAMING_SNAKE_CASE = 0.1_82_15 ,) -> Optional[int]:
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ : List[Any] = Encoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,down_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,double_z=_SCREAMING_SNAKE_CASE ,)
# pass init params to Decoder
UpperCAmelCase_ : List[str] = Decoder(
in_channels=_SCREAMING_SNAKE_CASE ,out_channels=_SCREAMING_SNAKE_CASE ,up_block_types=_SCREAMING_SNAKE_CASE ,block_out_channels=_SCREAMING_SNAKE_CASE ,layers_per_block=_SCREAMING_SNAKE_CASE ,norm_num_groups=_SCREAMING_SNAKE_CASE ,act_fn=_SCREAMING_SNAKE_CASE ,)
UpperCAmelCase_ : int = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
UpperCAmelCase_ : Union[str, Any] = nn.Convad(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,1 )
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : Dict = False
# only relevant if vae tiling is enabled
UpperCAmelCase_ : List[Any] = self.config.sample_size
UpperCAmelCase_ : List[str] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ : List[str] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ : int = 0.25
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> int:
if isinstance(_SCREAMING_SNAKE_CASE ,(Encoder, Decoder) ):
UpperCAmelCase_ : Union[str, Any] = value
def a__ ( self ,_SCREAMING_SNAKE_CASE = True ) -> Optional[Any]:
UpperCAmelCase_ : Dict = use_tiling
def a__ ( self ) -> Optional[Any]:
self.enable_tiling(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : str = True
def a__ ( self ) -> Any:
UpperCAmelCase_ : Any = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ) -> Dict[str, AttentionProcessor]:
UpperCAmelCase_ : int = {}
def fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
UpperCAmelCase_ : Tuple = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return processors
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = len(self.attn_processors.keys() )
if isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) and len(_SCREAMING_SNAKE_CASE ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(_SCREAMING_SNAKE_CASE )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
if hasattr(_SCREAMING_SNAKE_CASE ,'''set_processor''' ):
if not isinstance(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
module.set_processor(_SCREAMING_SNAKE_CASE )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
for name, module in self.named_children():
fn_recursive_attn_processor(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ : int = [self.encoder(_SCREAMING_SNAKE_CASE ) for x_slice in x.split(1 )]
UpperCAmelCase_ : List[str] = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Optional[Any] = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = self.quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_SCREAMING_SNAKE_CASE ,return_dict=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = self.decoder(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
@apply_forward_hook
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ : Tuple = [self._decode(_SCREAMING_SNAKE_CASE ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ : str = torch.cat(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : str = self._decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ : str = min(a.shape[2] ,b.shape[2] ,_SCREAMING_SNAKE_CASE )
for y in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ : Any = min(a.shape[3] ,b.shape[3] ,_SCREAMING_SNAKE_CASE )
for x in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> AutoencoderKLOutput:
UpperCAmelCase_ : Optional[int] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : Any = int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : str = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,x.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = []
for j in range(0 ,x.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : List[str] = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ : int = self.encoder(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.quant_conv(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[Any] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : List[str] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
UpperCAmelCase_ : Union[str, Any] = DiagonalGaussianDistribution(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = True ) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ : str = int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ : Any = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ : Tuple = []
for i in range(0 ,z.shape[2] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Tuple = []
for j in range(0 ,z.shape[3] ,_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ : List[str] = self.post_quant_conv(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.decoder(_SCREAMING_SNAKE_CASE )
row.append(_SCREAMING_SNAKE_CASE )
rows.append(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = []
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : int = []
for j, tile in enumerate(_SCREAMING_SNAKE_CASE ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ : Optional[int] = self.blend_v(rows[i - 1][j] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
if j > 0:
UpperCAmelCase_ : Optional[int] = self.blend_h(row[j - 1] ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_SCREAMING_SNAKE_CASE ,dim=3 ) )
UpperCAmelCase_ : Dict = torch.cat(_SCREAMING_SNAKE_CASE ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = True ,_SCREAMING_SNAKE_CASE = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
UpperCAmelCase_ : str = sample
UpperCAmelCase_ : Optional[Any] = self.encode(_SCREAMING_SNAKE_CASE ).latent_dist
if sample_posterior:
UpperCAmelCase_ : List[Any] = posterior.sample(generator=_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : Union[str, Any] = posterior.mode()
UpperCAmelCase_ : List[Any] = self.decode(_SCREAMING_SNAKE_CASE ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_SCREAMING_SNAKE_CASE )
| 235
| 0
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
A_ : List[str] = logging.get_logger(__name__)
A_ : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
A_ : List[Any] = {
"vocab_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"
},
"merges_file": {
"allegro/herbert-base-cased": "https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"
},
}
A_ : str = {"allegro/herbert-base-cased": 514}
A_ : Dict = {}
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[str] = VOCAB_FILES_NAMES
lowerCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : str = HerbertTokenizer
def __init__( self : Tuple , __UpperCAmelCase : int=None , __UpperCAmelCase : Any=None , __UpperCAmelCase : str=None , __UpperCAmelCase : Union[str, Any]="<s>" , __UpperCAmelCase : str="<unk>" , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : Dict="<mask>" , __UpperCAmelCase : Optional[Any]="</s>" , **__UpperCAmelCase : List[Any] , ) -> Any:
super().__init__(
__UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , **__UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 165
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class lowerCamelCase (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
SCREAMING_SNAKE_CASE__ = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(__UpperCAmelCase )
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ = load_dataset("""nielsr/rvlcdip-demo""" )
SCREAMING_SNAKE_CASE__ = dataset["""train"""][0]["""image"""].convert("""RGB""" )
SCREAMING_SNAKE_CASE__ = image_processor(__UpperCAmelCase , return_tensors="""pt""" ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , __UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[-0.4_158, -0.4_092, -0.4_347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 165
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowercase = '''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowercase = concatenate_datasets
__lowercase = DownloadConfig
__lowercase = DownloadManager
__lowercase = DownloadMode
__lowercase = DownloadConfig
__lowercase = DownloadMode
__lowercase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 354
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
__UpperCamelCase :Union[str, Any] = grid[0]
for row_n in range(1 , len(SCREAMING_SNAKE_CASE ) ):
__UpperCamelCase :Optional[int] = grid[row_n]
__UpperCamelCase :Dict = fill_row(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = grid[row_n]
return grid[-1][-1]
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(SCREAMING_SNAKE_CASE ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
| 0
|
"""simple docstring"""
import numpy as np
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def a__ ( SCREAMING_SNAKE_CASE : np.array ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108
|
'''simple docstring'''
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
lowercase__ : Any = logging.get_logger(__name__)
class __lowerCAmelCase :
"""simple docstring"""
_snake_case : List[str] = None
@experimental
def a__ ( lowercase : Union[str, Any], lowercase : Optional[int], lowercase : Tuple, lowercase : List[Any], lowercase : Dict, lowercase : Union[str, Any], lowercase : Optional[Any] ) -> int:
"""simple docstring"""
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
return _map_with_joblib(lowercase, lowercase, lowercase, lowercase, lowercase, lowercase, lowercase )
def a__ ( lowercase : Dict, lowercase : str, lowercase : Union[str, Any], lowercase : Optional[Any], lowercase : Optional[int], lowercase : Optional[Any], lowercase : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = num_proc if num_proc <= len(lowercase ) else len(lowercase )
_UpperCamelCase = [] # We organize the splits ourselve (contiguous splits)
for index in range(lowercase ):
_UpperCamelCase = len(lowercase ) // num_proc
_UpperCamelCase = len(lowercase ) % num_proc
_UpperCamelCase = div * index + min(lowercase, lowercase )
_UpperCamelCase = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(lowercase ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"""Error dividing inputs iterable among processes. """
F"""Total number of objects {len(lowercase )}, """
F"""length: {sum(len(i[1] ) for i in split_kwds )}""" )
logger.info(
F"""Spawning {num_proc} processes for {len(lowercase )} objects in slices of {[len(i[1] ) for i in split_kwds]}""" )
_UpperCamelCase , _UpperCamelCase = None, None
if not disable_tqdm:
_UpperCamelCase , _UpperCamelCase = (RLock(),), tqdm.set_lock
with Pool(lowercase, initargs=lowercase, initializer=lowercase ) as pool:
_UpperCamelCase = pool.map(lowercase, lowercase )
logger.info(F"""Finished {num_proc} processes""" )
_UpperCamelCase = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"""Unpacked {len(lowercase )} objects""" )
return mapped
def a__ ( lowercase : str, lowercase : Tuple, lowercase : List[str], lowercase : List[str], lowercase : Any, lowercase : int, lowercase : Optional[Any] ) -> Any:
"""simple docstring"""
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name, n_jobs=lowercase ):
return joblib.Parallel()(
joblib.delayed(lowercase )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
_UpperCamelCase = None
| 324
| 0
|
"""simple docstring"""
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase ( _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for rt in rc.restypes:
_UpperCAmelCase = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
_UpperCAmelCase = {name: i for i, name in enumerate(_SCREAMING_SNAKE_CASE )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['''aatype'''].device , )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.intaa , device=protein['''aatype'''].device , )
_UpperCAmelCase = torch.tensor(
_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=protein['''aatype'''].device , )
_UpperCAmelCase = protein['''aatype'''].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
_UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_UpperCAmelCase = restype_atomaa_mask[protein_aatype]
_UpperCAmelCase = residx_atomaa_mask
_UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
_UpperCAmelCase = restype_atomaa_to_atomaa[protein_aatype]
_UpperCAmelCase = residx_atomaa_to_atomaa.long()
# create the corresponding mask
_UpperCAmelCase = torch.zeros([21, 37] , dtype=torch.floataa , device=protein['''aatype'''].device )
for restype, restype_letter in enumerate(rc.restypes ):
_UpperCAmelCase = rc.restype_atoa[restype_letter]
_UpperCAmelCase = rc.residue_atoms[restype_name]
for atom_name in atom_names:
_UpperCAmelCase = rc.atom_order[atom_name]
_UpperCAmelCase = 1
_UpperCAmelCase = restype_atomaa_mask[protein_aatype]
_UpperCAmelCase = residx_atomaa_mask
return protein
def lowercase ( _SCREAMING_SNAKE_CASE : Dict[str, torch.Tensor] ):
'''simple docstring'''
_UpperCAmelCase = tree_map(lambda _SCREAMING_SNAKE_CASE : torch.tensor(_SCREAMING_SNAKE_CASE , device=batch['''aatype'''].device ) , _SCREAMING_SNAKE_CASE , np.ndarray )
_UpperCAmelCase = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : np.array(_SCREAMING_SNAKE_CASE ) , make_atomaa_masks(_SCREAMING_SNAKE_CASE ) )
return out
| 326
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = CTRLTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowercase__ ( self : Dict )->str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_UpperCAmelCase = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_UpperCAmelCase = {'''unk_token''': '''<unk>'''}
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__UpperCamelCase ) )
def lowercase__ ( self : str , **__UpperCamelCase : Union[str, Any] )->Any:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def lowercase__ ( self : Union[str, Any] , __UpperCamelCase : Optional[int] )->Tuple:
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt react readapt apt'''
return input_text, output_text
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase = '''adapt react readapt apt'''
_UpperCAmelCase = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
| 326
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__A =logging.get_logger(__name__)
__A ={'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__A ={
'''vocab_file''': {'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'''},
'''tokenizer_file''': {
'''mobilebert-uncased''': '''https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'''
},
}
__A ={'''mobilebert-uncased''': 5_1_2}
__A ={}
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> Union[str, Any]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(lowercase , normalizer_state.pop("type" ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**lowercase )
lowerCamelCase_ = do_lower_case
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None ) -> str:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 19
|
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = HfArgumentParser(__A )
a_ : Optional[int] = parser.parse_args_into_dataclasses()[0]
a_ : List[Any] = TensorFlowBenchmark(args=__A )
try:
a_ : List[str] = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
a_ : Dict = 'Arg --no_{0} is no longer used, please use --no-{0} instead.'
a_ : Dict = ' '.join(str(__A ).split(' ' )[:-1] )
a_ : int = ''
a_ : int = eval(str(__A ).split(' ' )[-1] )
a_ : Any = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__A )
if len(__A ) > 0:
a_ : str = full_error_msg + begin_error_msg + str(__A )
raise ValueError(__A )
benchmark.run()
if __name__ == "__main__":
main()
| 32
| 0
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase ( pl.LightningModule ):
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] ) -> int:
super().__init__()
lowerCamelCase__ : List[str] = model
lowerCamelCase__ : Dict = 2
lowerCamelCase__ : Dict = nn.Linear(self.model.config.hidden_size , self.num_labels )
def A_ ( self : Optional[Any] ) -> Optional[Any]:
pass
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
# load longformer model from model identifier
lowerCamelCase__ : List[str] = LongformerModel.from_pretrained(_UpperCAmelCase )
lowerCamelCase__ : Dict = LightningModel(_UpperCAmelCase )
lowerCamelCase__ : List[Any] = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
lowerCamelCase__ : Dict = LongformerForQuestionAnswering.from_pretrained(_UpperCAmelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_UpperCAmelCase )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--longformer_model""",
default=None,
type=str,
required=True,
help="""model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.""",
)
parser.add_argument(
"""--longformer_question_answering_ckpt_path""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch Lightning Checkpoint.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCAmelCase : Any = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 45
|
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
if number > 0:
raise ValueError('input must be a negative integer' )
lowerCamelCase__ : str = len(bin(_UpperCAmelCase )[3:] )
lowerCamelCase__ : Dict = bin(abs(_UpperCAmelCase ) - (1 << binary_number_length) )[3:]
lowerCamelCase__ : Optional[int] = (
(
'1'
+ '0' * (binary_number_length - len(_UpperCAmelCase ))
+ twos_complement_number
)
if number < 0
else '0'
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt')
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="""cifar10""" ,metadata={"""help""": """Name of a dataset from the datasets package"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """The column name of the images in the files."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ ,metadata={"""help""": """A folder containing the training data."""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(default=lowercase__ ,metadata={"""help""": """A folder containing the validation data."""} )
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=0.1_5 ,metadata={"""help""": """Percent to split off of train for validation."""} )
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[int] = field(
default=lowercase__ ,metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} ,)
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = {}
if self.train_dir is not None:
UpperCAmelCase_ : Optional[int] = self.train_dir
if self.validation_dir is not None:
UpperCAmelCase_ : List[str] = self.validation_dir
UpperCAmelCase_ : List[str] = data_files if data_files else None
@dataclass
class A_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = field(
default=lowercase__ ,metadata={
"""help""": (
"""The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Pretrained config name or path if not the same as model_name_or_path"""} )
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={
"""help""": (
"""Override some existing default config settings when a model is trained from scratch. Example: """
"""n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"""
)
} ,)
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default=lowercase__ ,metadata={"""help""": """Where do you want to store the pretrained models downloaded from s3"""} )
SCREAMING_SNAKE_CASE__ : str = field(
default="""main""" ,metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} ,)
SCREAMING_SNAKE_CASE__ : str = field(default=lowercase__ ,metadata={"""help""": """Name or path of preprocessor config."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} ,)
SCREAMING_SNAKE_CASE__ : float = field(
default=0.7_5 ,metadata={"""help""": """The ratio of the number of masked tokens in the input sequence."""} )
SCREAMING_SNAKE_CASE__ : bool = field(
default=lowercase__ ,metadata={"""help""": """Whether or not to train with normalized pixel values as target."""} )
@dataclass
class A_ (lowercase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float = field(
default=1e-3 ,metadata={"""help""": """Base learning rate: absolute_lr = base_lr * total_batch_size / 256."""} )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def __a ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae", __lowerCamelCase, __lowerCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ : Any = training_args.get_process_log_level()
logger.setLevel(__lowerCamelCase )
transformers.utils.logging.set_verbosity(__lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase_ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ : List[str] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
UpperCAmelCase_ : Tuple = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, data_files=data_args.data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
# If we don't have a validation split, split off a percentage of train as validation.
UpperCAmelCase_ : Optional[Any] = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split, __lowerCamelCase ) and data_args.train_val_split > 0.0:
UpperCAmelCase_ : List[str] = ds["train"].train_test_split(data_args.train_val_split )
UpperCAmelCase_ : Dict = split["train"]
UpperCAmelCase_ : str = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : Dict = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
UpperCAmelCase_ : Tuple = ViTMAEConfig.from_pretrained(model_args.config_name, **__lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
UpperCAmelCase_ : Tuple = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
UpperCAmelCase_ : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name, **__lowerCamelCase )
elif model_args.model_name_or_path:
UpperCAmelCase_ : str = ViTImageProcessor.from_pretrained(model_args.model_name_or_path, **__lowerCamelCase )
else:
UpperCAmelCase_ : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
UpperCAmelCase_ : int = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path ), config=__lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
else:
logger.info("Training new model from scratch" )
UpperCAmelCase_ : Dict = ViTMAEForPreTraining(__lowerCamelCase )
if training_args.do_train:
UpperCAmelCase_ : Optional[int] = ds["train"].column_names
else:
UpperCAmelCase_ : Dict = ds["validation"].column_names
if data_args.image_column_name is not None:
UpperCAmelCase_ : int = data_args.image_column_name
elif "image" in column_names:
UpperCAmelCase_ : Optional[Any] = "image"
elif "img" in column_names:
UpperCAmelCase_ : Optional[int] = "img"
else:
UpperCAmelCase_ : str = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
UpperCAmelCase_ : List[str] = image_processor.size["shortest_edge"]
else:
UpperCAmelCase_ : int = (image_processor.size["height"], image_processor.size["width"])
UpperCAmelCase_ : Tuple = Compose(
[
Lambda(lambda __lowerCamelCase : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__lowerCamelCase, scale=(0.2, 1.0), interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean, std=image_processor.image_std ),
] )
def preprocess_images(__lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = [transforms(__lowerCamelCase ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
UpperCAmelCase_ : List[str] = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__lowerCamelCase )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
UpperCAmelCase_ : Optional[int] = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__lowerCamelCase )
# Compute absolute learning rate
UpperCAmelCase_ : Optional[int] = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
UpperCAmelCase_ : Union[str, Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
UpperCAmelCase_ : Dict = Trainer(
model=__lowerCamelCase, args=__lowerCamelCase, train_dataset=ds["train"] if training_args.do_train else None, eval_dataset=ds["validation"] if training_args.do_eval else None, tokenizer=__lowerCamelCase, data_collator=__lowerCamelCase, )
# Training
if training_args.do_train:
UpperCAmelCase_ : str = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ : Tuple = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ : Union[str, Any] = last_checkpoint
UpperCAmelCase_ : List[str] = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model()
trainer.log_metrics("train", train_result.metrics )
trainer.save_metrics("train", train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCAmelCase_ : Optional[Any] = trainer.evaluate()
trainer.log_metrics("eval", __lowerCamelCase )
trainer.save_metrics("eval", __lowerCamelCase )
# Write model card and (optionally) push to hub
UpperCAmelCase_ : Optional[Any] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__lowerCamelCase )
else:
trainer.create_model_card(**__lowerCamelCase )
def __a ( __lowerCamelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 61
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "roformer"
def __init__( self , _a=5_0_0_0_0 , _a=None , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_5_3_6 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=0 , _a=False , _a=True , **_a , ) -> List[str]:
super().__init__(pad_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : List[Any] = hidden_size if embedding_size is None else embedding_size
_a : Any = hidden_size
_a : Any = num_hidden_layers
_a : List[Any] = num_attention_heads
_a : str = hidden_act
_a : Any = intermediate_size
_a : Dict = hidden_dropout_prob
_a : Optional[Any] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Dict = type_vocab_size
_a : List[Any] = initializer_range
_a : Dict = layer_norm_eps
_a : Dict = rotary_value
_a : Dict = use_cache
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[Any] = {0: '''batch''', 1: '''sequence'''}
_a : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 235
| 0
|
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase : Tuple = BarthezTokenizer
__UpperCamelCase : Optional[int] = BarthezTokenizerFast
__UpperCamelCase : Any = True
__UpperCamelCase : List[Any] = True
def __magic_name__ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
_A: Dict = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=lowerCAmelCase_ )
_A: List[Any] = tokenizer
def __magic_name__ ( self : Any ):
"""simple docstring"""
_A: str = """<pad>"""
_A: Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(lowerCAmelCase_ ) , 1_0_1_1_2_2 )
def __magic_name__ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def __magic_name__ ( self : str ):
"""simple docstring"""
_A: int = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_A: Any = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
_A: Any = self.tokenizer(
lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A: Any = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __magic_name__ ( self : Dict ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_A: Union[str, Any] = self.get_tokenizer()
_A: Union[str, Any] = self.get_rust_tokenizer()
_A: Union[str, Any] = """I was born in 92000, and this is falsé."""
_A: Dict = tokenizer.tokenize(lowerCAmelCase_ )
_A: Optional[Any] = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: int = tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_A: int = rust_tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_A: Optional[Any] = self.get_rust_tokenizer()
_A: Optional[Any] = tokenizer.encode(lowerCAmelCase_ )
_A: Any = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@slow
def __magic_name__ ( self : Dict ):
"""simple docstring"""
# fmt: off
_A: int = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A: str = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=lowerCAmelCase_ , )
| 351
|
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
UpperCAmelCase__ : Optional[int] = 'bart'
UpperCAmelCase__ : Dict = True
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Dict:
if LOAD_DENSE_INDEX:
_A: Optional[Any] = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
_A: Any = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
_A: Any = qar_model.eval()
else:
_A , _A: Union[str, Any] = (None, None)
if MODEL_TYPE == "bart":
_A: Union[str, Any] = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
_A: Dict = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
_A: Union[str, Any] = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
_A: int = sas_model.eval()
else:
_A , _A: Tuple = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> Tuple:
if LOAD_DENSE_INDEX:
_A: List[Any] = faiss.StandardGpuResources()
_A: int = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['''train''']
_A: Dict = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 1_28) , )
_A: str = faiss.IndexFlatIP(1_28 )
_A: Optional[int] = faiss.index_cpu_to_gpu(a , 1 , a )
wikiaab_gpu_index_flat.add(a ) # TODO fix for larger GPU
else:
_A , _A: str = (None, None)
_A: Tuple = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=a )
def lowerCamelCase__ ( ) -> str:
_A: Dict = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
_A: Dict = elia['''train_eli5''']
_A: List[Any] = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 1_28) )
_A: Any = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(a )
return (elia_train, eli5_train_q_index)
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : int = load_indexes()
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ : Any = load_models()
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = load_train_data()
def lowerCamelCase__ ( a , a=10 ) -> str:
_A: Optional[int] = embed_questions_for_retrieval([question] , a , a )
_A , _A: List[str] = eli5_train_q_index.search(a , a )
_A: Dict = [elia_train[int(a )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( a , a="wiki40b" , a="dense" , a=10 ) -> str:
if source == "none":
_A , _A: Any = (''' <P> '''.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
_A , _A: List[Any] = query_qa_dense_index(
a , a , a , a , a , a )
else:
_A , _A: Tuple = query_es_index(
a , a , index_name='''english_wiki40b_snippets_100w''' , n_results=a , )
_A: Union[str, Any] = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
_A: str = '''question: {} context: {}'''.format(a , a )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda a : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda a : None),
} )
def lowerCamelCase__ ( a , a , a , a=64 , a=2_56 , a=False , a=2 , a=0.95 , a=0.8 ) -> str:
with torch.no_grad():
_A: Optional[int] = qa_sas_generate(
a , a , a , num_answers=1 , num_beams=a , min_len=a , max_len=a , do_sample=a , temp=a , top_p=a , top_k=a , max_input_length=10_24 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
UpperCAmelCase__ : List[Any] = '<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'
UpperCAmelCase__ : Optional[Any] = '\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class="img-container"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
UpperCAmelCase__ : str = '\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n'
st.sidebar.markdown(description, unsafe_allow_html=True)
UpperCAmelCase__ : str = [
'Answer the question',
'View the retrieved document only',
'View the most similar ELI5 question and answer',
'Show me everything, please!',
]
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Demo options')
if demo_options:
UpperCAmelCase__ : Any = st.sidebar.selectbox(
'',
action_list,
index=3,
)
UpperCAmelCase__ : List[str] = action_list.index(action_st)
UpperCAmelCase__ : Optional[Any] = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
UpperCAmelCase__ : List[Any] = show_type == 'Show full text of passages'
else:
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Optional[Any] = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
UpperCAmelCase__ : List[str] = '\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n '
st.sidebar.markdown(retriever_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
UpperCAmelCase__ : int = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
UpperCAmelCase__ : Tuple = 'wiki40b'
UpperCAmelCase__ : List[Any] = 'dense'
UpperCAmelCase__ : Tuple = 'beam'
UpperCAmelCase__ : Any = 2
UpperCAmelCase__ : Dict = 64
UpperCAmelCase__ : Any = 256
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[int] = st.sidebar.checkbox('Generation options')
if generate_options:
UpperCAmelCase__ : Any = '\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder\'s output probabilities.\n '
st.sidebar.markdown(generate_info)
UpperCAmelCase__ : Optional[int] = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
UpperCAmelCase__ : int = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
UpperCAmelCase__ : str = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
UpperCAmelCase__ : Tuple = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
UpperCAmelCase__ : List[Any] = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Union[str, Any] = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
UpperCAmelCase__ : Optional[int] = None
# start main text
UpperCAmelCase__ : Any = [
'<MY QUESTION>',
'How do people make chocolate?',
'Why do we get a fever when we are sick?',
'How can different animals perceive different colors?',
'What is natural language processing?',
'What\'s the best way to treat a sunburn?',
'What exactly are vitamins ?',
'How does nuclear energy provide electricity?',
'What\'s the difference between viruses and bacteria?',
'Why are flutes classified as woodwinds when most of them are made out of metal ?',
'Why do people like drinking coffee even though it tastes so bad?',
'What happens when wine ages? How does it make the wine taste better?',
'If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?',
'How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?',
'How does New Zealand have so many large bird predators?',
]
UpperCAmelCase__ : List[Any] = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
UpperCAmelCase__ : Any = st.text_input('Enter your question here:', '')
else:
UpperCAmelCase__ : int = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
UpperCAmelCase__ ,UpperCAmelCase__ : Tuple = make_support(question, source=wiki_source, method='dense', n_results=10)
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = make_support(question, source=wiki_source, method='sparse', n_results=10)
UpperCAmelCase__ : Dict = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
UpperCAmelCase__ : str = support_list[:10]
UpperCAmelCase__ : str = '<P> ' + ' <P> '.join([res[-1] for res in support_list])
else:
UpperCAmelCase__ ,UpperCAmelCase__ : List[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
UpperCAmelCase__ ,UpperCAmelCase__ : Optional[Any] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
UpperCAmelCase__ : Any = 'https://en.wikipedia.org/wiki/{}'.format(res[0].replace(' ', '_'))
UpperCAmelCase__ : Tuple = res[1].strip()
if sec_titles == "":
UpperCAmelCase__ : Optional[int] = '[{}]({})'.format(res[0], wiki_url)
else:
UpperCAmelCase__ : int = sec_titles.split(' & ')
UpperCAmelCase__ : Union[str, Any] = ' & '.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style="font-family:arial; font-size:10pt;">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
UpperCAmelCase__ : Union[str, Any] = find_nearest_training(question)
UpperCAmelCase__ : int = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
UpperCAmelCase__ : Tuple = [
'{}. {}'.format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
UpperCAmelCase__ : Any = '\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n'
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 301
| 0
|
'''simple docstring'''
import random
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : str ) -> tuple:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = [], [], []
for element in data:
if element < pivot:
less.append(_lowerCAmelCase )
elif element > pivot:
greater.append(_lowerCAmelCase )
else:
equal.append(_lowerCAmelCase )
return less, equal, greater
def snake_case_ ( _lowerCAmelCase : list , _lowerCAmelCase : int ) -> Dict:
# index = len(items) // 2 when trying to find the median
# (value of index when items is sorted)
# invalid input
if index >= len(_lowerCAmelCase ) or index < 0:
return None
UpperCAmelCase : int = items[random.randint(0 , len(_lowerCAmelCase ) - 1 )]
UpperCAmelCase : List[Any] = 0
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = _partition(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : List[str] = len(_lowerCAmelCase )
UpperCAmelCase : str = len(_lowerCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(_lowerCAmelCase , _lowerCAmelCase )
# must be in larger
else:
return quick_select(_lowerCAmelCase , index - (m + count) )
| 23
|
"""simple docstring"""
a : Any = {
'''a''': '''AAAAA''',
'''b''': '''AAAAB''',
'''c''': '''AAABA''',
'''d''': '''AAABB''',
'''e''': '''AABAA''',
'''f''': '''AABAB''',
'''g''': '''AABBA''',
'''h''': '''AABBB''',
'''i''': '''ABAAA''',
'''j''': '''BBBAA''',
'''k''': '''ABAAB''',
'''l''': '''ABABA''',
'''m''': '''ABABB''',
'''n''': '''ABBAA''',
'''o''': '''ABBAB''',
'''p''': '''ABBBA''',
'''q''': '''ABBBB''',
'''r''': '''BAAAA''',
'''s''': '''BAAAB''',
'''t''': '''BAABA''',
'''u''': '''BAABB''',
'''v''': '''BBBAB''',
'''w''': '''BABAA''',
'''x''': '''BABAB''',
'''y''': '''BABBA''',
'''z''': '''BABBB''',
''' ''': ''' ''',
}
a : List[Any] = {value: key for key, value in encode_dict.items()}
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
a : int = ""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def _SCREAMING_SNAKE_CASE ( _lowercase : str ) ->str:
'''simple docstring'''
if set(_lowercase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
a : Optional[Any] = ""
for word in coded.split():
while len(_lowercase ) != 0:
decoded += decode_dict[word[:5]]
a : List[Any] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 105
| 0
|
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_A = data_utils.TransfoXLTokenizer
_A = data_utils.TransfoXLCorpus
_A = data_utils
_A = data_utils
def UpperCAmelCase ( a_, a_, a_, a_ ):
'''simple docstring'''
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(a_, 'rb' ) as fp:
lowerCamelCase : Tuple = pickle.load(a_, encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
lowerCamelCase : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"""Save vocabulary to {pytorch_vocab_dump_path}""" )
lowerCamelCase : str = corpus.vocab.__dict__
torch.save(a_, a_ )
lowerCamelCase : str = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', a_ )
lowerCamelCase : Union[str, Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"""Save dataset to {pytorch_dataset_dump_path}""" )
torch.save(a_, a_ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
lowerCamelCase : Union[str, Any] = os.path.abspath(a_ )
lowerCamelCase : Union[str, Any] = os.path.abspath(a_ )
print(F"""Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.""" )
# Initialise PyTorch model
if transfo_xl_config_file == "":
lowerCamelCase : Any = TransfoXLConfig()
else:
lowerCamelCase : int = TransfoXLConfig.from_json_file(a_ )
print(F"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase : str = TransfoXLLMHeadModel(a_ )
lowerCamelCase : Any = load_tf_weights_in_transfo_xl(a_, a_, a_ )
# Save pytorch-model
lowerCamelCase : Dict = os.path.join(a_, a_ )
lowerCamelCase : Tuple = os.path.join(a_, a_ )
print(F"""Save PyTorch model to {os.path.abspath(a_ )}""" )
torch.save(model.state_dict(), a_ )
print(F"""Save configuration file to {os.path.abspath(a_ )}""" )
with open(a_, 'w', encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_A = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 205
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def UpperCAmelCase ( a_ ):
'''simple docstring'''
lowerCamelCase : int = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase : Tuple = key.replace('module.encoder', 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase : str = key.replace('module.decoder', 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase : Any = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase : Dict = key.replace(F"""patch_embed{idx}""", F"""patch_embeddings.{int(a_ )-1}""" )
if "norm" in key:
lowerCamelCase : Optional[int] = key.replace('norm', 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase : List[str] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase : List[str] = key.replace(F"""layer_norm{idx}""", F"""layer_norm.{int(a_ )-1}""" )
if "layer_norm1" in key:
lowerCamelCase : List[Any] = key.replace('layer_norm1', 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase : str = key.replace('layer_norm2', 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase : Union[str, Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase : List[str] = key.replace(F"""block{idx}""", F"""block.{int(a_ )-1}""" )
if "attn.q" in key:
lowerCamelCase : Union[str, Any] = key.replace('attn.q', 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase : Dict = key.replace('attn.proj', 'attention.output.dense' )
if "attn" in key:
lowerCamelCase : int = key.replace('attn', 'attention.self' )
if "fc1" in key:
lowerCamelCase : Any = key.replace('fc1', 'dense1' )
if "fc2" in key:
lowerCamelCase : List[Any] = key.replace('fc2', 'dense2' )
if "linear_pred" in key:
lowerCamelCase : Optional[Any] = key.replace('linear_pred', 'classifier' )
if "linear_fuse" in key:
lowerCamelCase : Union[str, Any] = key.replace('linear_fuse.conv', 'linear_fuse' )
lowerCamelCase : Optional[int] = key.replace('linear_fuse.bn', 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase : str = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase : List[Any] = key.replace(F"""linear_c{idx}""", F"""linear_c.{int(a_ )-1}""" )
if "bot_conv" in key:
lowerCamelCase : int = key.replace('bot_conv', '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase : Any = key.replace('skip_conv1', '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase : Optional[Any] = key.replace('skip_conv2', '2.convolution' )
if "fusion1" in key:
lowerCamelCase : str = key.replace('fusion1', '1.fusion' )
if "fusion2" in key:
lowerCamelCase : Optional[Any] = key.replace('fusion2', '2.fusion' )
if "fusion3" in key:
lowerCamelCase : List[str] = key.replace('fusion3', '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase : Optional[int] = key.replace('conv', 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase : Tuple = key.replace('module.last_layer_depth', 'head.head' )
lowerCamelCase : List[Any] = value
return new_state_dict
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase : Any = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase : List[Any] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase : List[Any] = kv_bias[config.hidden_sizes[i] :]
def UpperCAmelCase ( ):
'''simple docstring'''
lowerCamelCase : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase : List[Any] = Image.open(requests.get(a_, stream=a_ ).raw )
return image
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_=False, a_=None ):
'''simple docstring'''
lowerCamelCase : int = GLPNConfig(hidden_sizes=[64, 128, 320, 512], decoder_hidden_size=64, depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase : Any = GLPNImageProcessor()
# prepare image
lowerCamelCase : int = prepare_img()
lowerCamelCase : Tuple = image_processor(images=a_, return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase : Optional[Any] = torch.load(a_, map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase : Any = rename_keys(a_ )
# key and value matrices need special treatment
read_in_k_v(a_, a_ )
# create HuggingFace model and load state dict
lowerCamelCase : Optional[int] = GLPNForDepthEstimation(a_ )
model.load_state_dict(a_ )
model.eval()
# forward pass
lowerCamelCase : str = model(a_ )
lowerCamelCase : Any = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase : Any = torch.tensor(
[[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] )
elif "kitti" in model_name:
lowerCamelCase : str = torch.tensor(
[[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase : int = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3], a_, atol=1E-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization='nielsr', commit_message='Add model', use_temp_dir=a_, )
image_processor.push_to_hub(
repo_path_or_name=Path(a_, a_ ), organization='nielsr', commit_message='Add image processor', use_temp_dir=a_, )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
_A = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 205
| 1
|
from __future__ import annotations
from collections.abc import Callable
_UpperCamelCase = list[list[float | int]]
def lowerCAmelCase__( lowercase : Matrix , lowercase : Matrix ) -> Matrix:
__snake_case : int = len(lowercase )
__snake_case : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowercase )]
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : int
__snake_case : float
for row in range(lowercase ):
for col in range(lowercase ):
__snake_case : str = matrix[row][col]
__snake_case : Optional[Any] = vector[row][0]
__snake_case : List[Any] = 0
__snake_case : Union[str, Any] = 0
while row < size and col < size:
# pivoting
__snake_case : Dict = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowercase , lowercase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__snake_case , __snake_case : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowercase ):
__snake_case : Tuple = augmented[rowa][col] / augmented[row][col]
__snake_case : Dict = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowercase ):
for row in range(lowercase ):
__snake_case : Any = augmented[row][col] / augmented[col][col]
for cola in range(lowercase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowercase )
]
def lowerCAmelCase__( lowercase : list[int] ) -> Callable[[int], int]:
__snake_case : int = len(lowercase )
__snake_case : Matrix = [[0 for _ in range(lowercase )] for _ in range(lowercase )]
__snake_case : Matrix = [[0] for _ in range(lowercase )]
__snake_case : Matrix
__snake_case : int
__snake_case : int
__snake_case : int
for x_val, y_val in enumerate(lowercase ):
for col in range(lowercase ):
__snake_case : Optional[Any] = (x_val + 1) ** (size - col - 1)
__snake_case : Dict = y_val
__snake_case : Union[str, Any] = solve(lowercase , lowercase )
def interpolated_func(lowercase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowercase ) )
return interpolated_func
def lowerCAmelCase__( lowercase : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCAmelCase__( lowercase : Callable[[int], int] = question_function , lowercase : int = 10 ) -> int:
__snake_case : list[int] = [func(lowercase ) for x_val in range(1 , order + 1 )]
__snake_case : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__snake_case : int = 0
__snake_case : Callable[[int], int]
__snake_case : int
for poly in polynomials:
__snake_case : Optional[int] = 1
while func(lowercase ) == poly(lowercase ):
x_val += 1
ret += poly(lowercase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 326
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] =["image_processor", "tokenizer"]
UpperCAmelCase_ : Tuple ="FlavaImageProcessor"
UpperCAmelCase_ : List[Any] =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , **UpperCAmelCase ) -> int:
'''simple docstring'''
__snake_case : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCAmelCase , )
__snake_case : List[Any] = kwargs.pop("feature_extractor" )
__snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCAmelCase , UpperCAmelCase )
__snake_case : Tuple = self.image_processor
def __call__( self , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 0 , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = False , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
__snake_case : Union[str, Any] = self.tokenizer(
text=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if images is not None:
__snake_case : Union[str, Any] = self.image_processor(
UpperCAmelCase , return_image_mask=UpperCAmelCase , return_codebook_pixels=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> str:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def UpperCAmelCase ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
__snake_case : List[Any] = self.tokenizer.model_input_names
__snake_case : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCAmelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCAmelCase , )
return self.image_processor
| 326
| 1
|
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : list ) -> int:
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
_SCREAMING_SNAKE_CASE =grid[0]
for row_n in range(1 , len(_UpperCamelCase ) ):
_SCREAMING_SNAKE_CASE =grid[row_n]
_SCREAMING_SNAKE_CASE =fill_row(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =grid[row_n]
return grid[-1][-1]
def _lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : list ) -> list:
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(_UpperCamelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 114
|
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
lowerCamelCase : List[Any] = "\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n"
lowerCamelCase : List[str] = "\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n"
lowerCamelCase : Dict = "\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'stsb')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {'pearson': 1.0, 'spearmanr': 1.0}\n\n >>> glue_metric = datasets.load_metric('glue', 'cola')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def _lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict ) -> List[str]:
"""simple docstring"""
return float((preds == labels).mean() )
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_SCREAMING_SNAKE_CASE =float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =float(pearsonr(_UpperCamelCase , _UpperCamelCase )[0] )
_SCREAMING_SNAKE_CASE =float(spearmanr(_UpperCamelCase , _UpperCamelCase )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
def A ( self : Optional[int] ) -> Any:
'''simple docstring'''
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32' ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def A ( self : str , _a : str , _a : List[Any] ) -> str:
'''simple docstring'''
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "stsb":
return pearson_and_spearman(_a , _a )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_a , _a )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]' )
| 114
| 1
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowercase_ = logging.get_logger("transformers.models.speecht5")
lowercase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
lowercase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
lowercase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
lowercase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
lowercase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
lowercase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
lowercase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
lowercase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = []
lowercase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Any ) -> Tuple:
for attribute in key.split('''.''' ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "running_mean":
__a = value
elif weight_type == "running_var":
__a = value
elif weight_type == "num_batches_tracked":
__a = value
else:
__a = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] ) -> int:
__a = []
if task == "s2t":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2T
__a = IGNORE_KEYS_S2T
elif task == "t2s":
__a = None
__a = MAPPING_T2S
__a = IGNORE_KEYS_T2S
elif task == "s2s":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2S
__a = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(f'''{name} was ignored''' )
continue
__a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
__a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a = True
if "*" in mapped_key:
__a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
elif "running_mean" in name:
__a = '''running_mean'''
elif "running_var" in name:
__a = '''running_var'''
elif "num_batches_tracked" in name:
__a = '''num_batches_tracked'''
else:
__a = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] ) -> Optional[int]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None , ) -> int:
if config_path is not None:
__a = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
__a = SpeechTaConfig()
if task == "s2t":
__a = config.max_text_positions
__a = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
__a = 1876
__a = 600
__a = config.max_speech_positions
__a = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
__a = 1876
__a = config.max_speech_positions
__a = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__a = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__a = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__a = SpeechTaFeatureExtractor()
__a = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 45
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : list[str] ) -> str:
__a = ''''''
for word_or_phrase in separated:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(lowerCAmelCase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 45
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple =logging.get_logger(__name__)
lowerCamelCase : Tuple ={
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class __a ( A__ ):
_lowerCAmelCase : str = '''roc_bert'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : List[str]=3_05_22 , SCREAMING_SNAKE_CASE : Tuple=7_68 , SCREAMING_SNAKE_CASE : List[str]=12 , SCREAMING_SNAKE_CASE : Optional[Any]=12 , SCREAMING_SNAKE_CASE : Tuple=30_72 , SCREAMING_SNAKE_CASE : List[Any]="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : List[str]=0.1 , SCREAMING_SNAKE_CASE : Dict=5_12 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : int=0.0_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1e-1_2 , SCREAMING_SNAKE_CASE : Optional[Any]=True , SCREAMING_SNAKE_CASE : List[Any]=0 , SCREAMING_SNAKE_CASE : Tuple="absolute" , SCREAMING_SNAKE_CASE : Optional[int]=None , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : Optional[int]=7_68 , SCREAMING_SNAKE_CASE : Dict=9_10 , SCREAMING_SNAKE_CASE : int=5_12 , SCREAMING_SNAKE_CASE : List[Any]=2_48_58 , SCREAMING_SNAKE_CASE : List[str]=True , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
UpperCamelCase__ : Dict = vocab_size
UpperCamelCase__ : Any = max_position_embeddings
UpperCamelCase__ : List[str] = hidden_size
UpperCamelCase__ : str = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Dict = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : List[str] = hidden_dropout_prob
UpperCamelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase__ : int = initializer_range
UpperCamelCase__ : Union[str, Any] = type_vocab_size
UpperCamelCase__ : int = layer_norm_eps
UpperCamelCase__ : Any = use_cache
UpperCamelCase__ : List[str] = enable_pronunciation
UpperCamelCase__ : Optional[int] = enable_shape
UpperCamelCase__ : Union[str, Any] = pronunciation_embed_dim
UpperCamelCase__ : Tuple = pronunciation_vocab_size
UpperCamelCase__ : str = shape_embed_dim
UpperCamelCase__ : Any = shape_vocab_size
UpperCamelCase__ : List[Any] = concat_input
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : str = classifier_dropout
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
| 196
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase ) -> list:
UpperCamelCase__ : Tuple = False
while is_sorted is False: # Until all the indices are traversed keep looping
UpperCamelCase__ : Tuple = True
for i in range(0 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Dict = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[str] = False
for i in range(1 , len(__lowerCAmelCase ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = input_list[i + 1], input_list[i]
# swapping if elements not in order
UpperCamelCase__ : List[Any] = False
return input_list
if __name__ == "__main__":
print('''Enter list to be sorted''')
lowerCamelCase : Any =[int(x) for x in input().split()]
# inputing elements of the list in one line
lowerCamelCase : Optional[int] =odd_even_sort(input_list)
print('''The sorted list is''')
print(sorted_list)
| 196
| 1
|
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCAmelCase ( lowerCamelCase__ ):
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_snake_case , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_snake_case , """num_heads""" ) )
class __lowerCAmelCase :
def __init__( self , _snake_case , _snake_case=13 , _snake_case=64 , _snake_case=3 , _snake_case=[16, 48, 96] , _snake_case=[1, 3, 6] , _snake_case=[1, 2, 10] , _snake_case=[7, 3, 3] , _snake_case=[4, 2, 2] , _snake_case=[2, 1, 1] , _snake_case=[2, 2, 2] , _snake_case=[False, False, True] , _snake_case=[0.0, 0.0, 0.0] , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=True , _snake_case=True , _snake_case=2 , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_sizes
_lowerCAmelCase = patch_stride
_lowerCAmelCase = patch_padding
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = num_labels
_lowerCAmelCase = num_channels
_lowerCAmelCase = embed_dim
_lowerCAmelCase = num_heads
_lowerCAmelCase = stride_kv
_lowerCAmelCase = depth
_lowerCAmelCase = cls_token
_lowerCAmelCase = attention_drop_rate
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
"""simple docstring"""
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModel(config=_snake_case )
_lowerCAmelCase = model(_snake_case , training=_snake_case )
_lowerCAmelCase = (self.image_size, self.image_size)
_lowerCAmelCase , _lowerCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_lowerCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_lowerCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def snake_case ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFCvtForImageClassification(_snake_case )
_lowerCAmelCase = model(_snake_case , labels=_snake_case , training=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = config_and_inputs
_lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
__lowerCamelCase = (
{'''feature-extraction''': TFCvtModel, '''image-classification''': TFCvtForImageClassification}
if is_tf_available()
else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtModelTester(self )
_lowerCAmelCase = TFCvtConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def snake_case ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def snake_case ( self ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def snake_case ( self ):
"""simple docstring"""
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def snake_case ( self ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_snake_case )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase = [*signature.parameters.keys()]
_lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _snake_case )
def snake_case ( self ):
"""simple docstring"""
def check_hidden_states_output(_snake_case , _snake_case , _snake_case ):
_lowerCAmelCase = model_class(_snake_case )
_lowerCAmelCase = model(**self._prepare_for_class(_snake_case , _snake_case ) )
_lowerCAmelCase = outputs.hidden_states
_lowerCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(_snake_case ) , _snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def snake_case ( self ):
"""simple docstring"""
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase = TFCvtModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_lowerCAmelCase = self.default_image_processor
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=_snake_case , return_tensors="""tf""" )
# forward pass
_lowerCAmelCase = model(**_snake_case )
# verify the logits
_lowerCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
_lowerCAmelCase = tf.constant([0.9285, 0.9015, -0.3150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _snake_case , atol=1e-4 ) )
| 82
|
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE_ = getLogger(__name__)
SCREAMING_SNAKE_CASE_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def lowercase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 8 , _lowerCAmelCase = DEFAULT_DEVICE , _lowerCAmelCase=False , _lowerCAmelCase="summarization" , _lowerCAmelCase=None , **_lowerCAmelCase , ):
__lowerCAmelCase = Path(_lowerCAmelCase ).open("""w""" , encoding="""utf-8""" )
__lowerCAmelCase = str(_lowerCAmelCase )
__lowerCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase ).to(_lowerCAmelCase )
if fpaa:
__lowerCAmelCase = model.half()
__lowerCAmelCase = AutoTokenizer.from_pretrained(_lowerCAmelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCAmelCase = time.time()
# update config with task specific params
use_task_specific_params(_lowerCAmelCase , _lowerCAmelCase )
if prefix is None:
__lowerCAmelCase = prefix or getattr(model.config , """prefix""" , """""" ) or """"""
for examples_chunk in tqdm(list(chunks(_lowerCAmelCase , _lowerCAmelCase ) ) ):
__lowerCAmelCase = [prefix + text for text in examples_chunk]
__lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="""pt""" , truncation=_lowerCAmelCase , padding="""longest""" ).to(_lowerCAmelCase )
__lowerCAmelCase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCAmelCase , )
__lowerCAmelCase = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase )
for hypothesis in dec:
fout.write(hypothesis + """\n""" )
fout.flush()
fout.close()
__lowerCAmelCase = int(time.time() - start_time ) # seconds
__lowerCAmelCase = len(_lowerCAmelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ():
return datetime.datetime.now().strftime("""%Y-%m-%d %H:%M:%S""" )
def lowercase (_lowerCAmelCase=True ):
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""model_name""" , type=_lowerCAmelCase , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""input_path""" , type=_lowerCAmelCase , help="""like cnn_dm/test.source""" )
parser.add_argument("""save_path""" , type=_lowerCAmelCase , help="""where to save summaries""" )
parser.add_argument("""--reference_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""like cnn_dm/test.target""" )
parser.add_argument("""--score_path""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default="""metrics.json""" , help="""where to save metrics""" )
parser.add_argument("""--device""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""cuda, cuda:1, cpu etc.""" )
parser.add_argument(
"""--prefix""" , type=_lowerCAmelCase , required=_lowerCAmelCase , default=_lowerCAmelCase , help="""will be added to the begininng of src examples""" )
parser.add_argument("""--task""" , type=_lowerCAmelCase , default="""summarization""" , help="""used for task_specific_params + metrics""" )
parser.add_argument("""--bs""" , type=_lowerCAmelCase , default=8 , required=_lowerCAmelCase , help="""batch size""" )
parser.add_argument(
"""--n_obs""" , type=_lowerCAmelCase , default=-1 , required=_lowerCAmelCase , help="""How many observations. Defaults to all.""" )
parser.add_argument("""--fp16""" , action="""store_true""" )
parser.add_argument("""--dump-args""" , action="""store_true""" , help="""print the custom hparams with the results""" )
parser.add_argument(
"""--info""" , nargs="""?""" , type=_lowerCAmelCase , const=datetime_now() , help=(
"""use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."""
""" lang=en-ru. If no value is passed, the current datetime string will be used."""
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCAmelCase , __lowerCAmelCase = parser.parse_known_args()
__lowerCAmelCase = parse_numeric_n_bool_cl_kwargs(_lowerCAmelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCAmelCase = [""" """ + x.rstrip() if """t5""" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCAmelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCAmelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("""Can't mix --fp16 and --device cpu""" )
__lowerCAmelCase = generate_summaries_or_translations(
_lowerCAmelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCAmelCase , )
if args.reference_path is None:
return {}
# Compute scores
__lowerCAmelCase = calculate_bleu if """translation""" in args.task else calculate_rouge
__lowerCAmelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCAmelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCAmelCase )]
__lowerCAmelCase = score_fn(_lowerCAmelCase , _lowerCAmelCase )
scores.update(_lowerCAmelCase )
if args.dump_args:
scores.update(_lowerCAmelCase )
if args.info:
__lowerCAmelCase = args.info
if verbose:
print(_lowerCAmelCase )
if args.score_path is not None:
json.dump(_lowerCAmelCase , open(args.score_path , """w""" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 301
| 0
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( __A : List[str] , __A : str , __A : Tuple , __A : Any , __A : Optional[int] ) -> List[str]:
"""simple docstring"""
with open(_a ) as metadata_file:
a_ : List[str] = json.load(_a )
a_ : str = LukeConfig(use_entity_aware_attention=_a , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
a_ : Any = torch.load(_a , map_location='cpu' )["module"]
# Load the entity vocab file
a_ : List[Any] = load_original_entity_vocab(_a )
# add an entry for [MASK2]
a_ : Optional[Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
a_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
a_ : List[Any] = AddedToken('<ent>' , lstrip=_a , rstrip=_a )
a_ : str = AddedToken('<ent2>' , lstrip=_a , rstrip=_a )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(_a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'r' ) as f:
a_ : str = json.load(_a )
a_ : Any = "MLukeTokenizer"
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(_a , _a )
with open(os.path.join(_a , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(_a , _a )
a_ : str = MLukeTokenizer.from_pretrained(_a )
# Initialize the embeddings of the special tokens
a_ : Dict = tokenizer.convert_tokens_to_ids(['@'] )[0]
a_ : str = tokenizer.convert_tokens_to_ids(['#'] )[0]
a_ : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
a_ : Any = word_emb[ent_init_index].unsqueeze(0 )
a_ : List[Any] = word_emb[enta_init_index].unsqueeze(0 )
a_ : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
a_ : Optional[int] = state_dict[bias_name]
a_ : List[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
a_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
a_ : Tuple = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
a_ : int = F"""encoder.layer.{layer_index}.attention.self."""
a_ : Optional[int] = state_dict[prefix + matrix_name]
a_ : Tuple = state_dict[prefix + matrix_name]
a_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
a_ : Optional[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
a_ : Union[str, Any] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
a_ : Optional[int] = state_dict["entity_predictions.bias"]
a_ : List[str] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
a_ : int = torch.cat([entity_prediction_bias, entity_mask_bias] )
a_ : Any = LukeForMaskedLM(config=_a ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
a_ : List[Any] = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
a_ : Union[str, Any] = state_dict[key]
else:
a_ : Dict = state_dict[key]
a_ : int = model.load_state_dict(_a , strict=_a )
if set(_a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(_a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
a_ : Any = MLukeTokenizer.from_pretrained(_a , task='entity_classification' )
a_ : int = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
a_ : int = (0, 9)
a_ : List[Any] = tokenizer(_a , entity_spans=[span] , return_tensors='pt' )
a_ : Optional[int] = model(**_a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : List[str] = torch.Size((1, 33, 7_68) )
a_ : List[str] = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
a_ : Optional[Any] = torch.Size((1, 1, 7_68) )
a_ : Optional[int] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _a , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
a_ : str = MLukeTokenizer.from_pretrained(_a )
a_ : str = "Tokyo is the capital of <mask>."
a_ : Union[str, Any] = (24, 30)
a_ : int = tokenizer(_a , entity_spans=[span] , return_tensors='pt' )
a_ : Tuple = model(**_a )
a_ : Union[str, Any] = encoding["input_ids"][0].tolist()
a_ : Dict = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
a_ : Optional[Any] = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(_a )
a_ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
a_ : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(_a ) )
model.save_pretrained(_a )
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Any:
"""simple docstring"""
a_ : Dict = ["[MASK]", "[PAD]", "[UNK]"]
a_ : Tuple = [json.loads(_a ) for line in open(_a )]
a_ : str = {}
for entry in data:
a_ : Optional[Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
a_ : int = entity_id
break
a_ : List[str] = F"""{language}:{entity_name}"""
a_ : Optional[Any] = entity_id
return new_mapping
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 364
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , torch_builtin(SCREAMING_SNAKE_CASE__ ) ) )
self.assertFalse(torch.allclose(gelu_python(SCREAMING_SNAKE_CASE__ ) , gelu_new(SCREAMING_SNAKE_CASE__ ) ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
a_ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
a_ : Union[str, Any] = get_activation('gelu' )
a_ : str = get_activation('gelu_10' )
a_ : Tuple = torch_builtin(SCREAMING_SNAKE_CASE__ )
a_ : str = geluaa(SCREAMING_SNAKE_CASE__ )
a_ : List[Any] = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(SCREAMING_SNAKE_CASE__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation('bogus' )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
get_activation(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
a_ : Any = get_activation('gelu' )
a_ : Any = 1
a_ : int = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(SCREAMING_SNAKE_CASE__ ):
a_ : Tuple = acta.a
| 120
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def a ( A__ : List[str] , A__ : int ) -> Optional[int]:
"""simple docstring"""
_lowercase =old_name
if "patch_embed" in old_name:
_lowercase , _lowercase , _lowercase =old_name.split('.' )
if layer == "0":
_lowercase =old_name.replace('0' , 'convolution1' )
elif layer == "1":
_lowercase =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
_lowercase =old_name.replace('3' , 'convolution2' )
else:
_lowercase =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , A__ ):
_lowercase =r'\b\d{2}\b'
if bool(re.search(A__ , A__ ) ):
_lowercase =re.search(r'\d\.\d\d.' , A__ ).group()
else:
_lowercase =re.search(r'\d\.\d.' , A__ ).group()
if int(match[0] ) < 6:
_lowercase =old_name.replace(A__ , '' )
_lowercase =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
_lowercase ='intermediate_stages.' + trimmed_name
else:
_lowercase =old_name.replace(A__ , '' )
if int(match[2] ) < num_meta4D_last_stage:
_lowercase =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
_lowercase =str(int(match[2] ) - num_meta4D_last_stage )
_lowercase =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
_lowercase =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
_lowercase =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
_lowercase =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
_lowercase =trimmed_name.replace('fc2' , 'linear_out' )
_lowercase ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , A__ ):
_lowercase =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
_lowercase =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_lowercase =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_lowercase =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
_lowercase =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
_lowercase =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
_lowercase =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
_lowercase ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_lowercase =new_name.replace('norm' , 'layernorm' )
_lowercase ='efficientformer.' + new_name
else:
_lowercase ='efficientformer.encoder.' + new_name
return new_name
def a ( A__ : Optional[int] , A__ : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for key in checkpoint.copy().keys():
_lowercase =checkpoint.pop(A__ )
_lowercase =val
return checkpoint
def a ( ) -> Union[str, Any]:
"""simple docstring"""
_lowercase ='http://images.cocodataset.org/val2017/000000039769.jpg'
_lowercase =Image.open(requests.get(A__ , stream=A__ ).raw )
return image
def a ( A__ : Path , A__ : Path , A__ : Path , A__ : bool ) -> Union[str, Any]:
"""simple docstring"""
_lowercase =torch.load(A__ , map_location='cpu' )['model']
_lowercase =EfficientFormerConfig.from_json_file(A__ )
_lowercase =EfficientFormerForImageClassificationWithTeacher(A__ )
_lowercase ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
_lowercase =config.depths[-1] - config.num_metaad_blocks + 1
_lowercase =convert_torch_checkpoint(A__ , A__ )
model.load_state_dict(A__ )
model.eval()
_lowercase ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
_lowercase =prepare_img()
_lowercase =256
_lowercase =224
_lowercase =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
_lowercase =processor(images=A__ , return_tensors='pt' ).pixel_values
# original processing pipeline
_lowercase =Compose(
[
Resize(A__ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(A__ ),
ToTensor(),
Normalize(A__ , A__ ),
] )
_lowercase =image_transforms(A__ ).unsqueeze(0 )
assert torch.allclose(A__ , A__ )
_lowercase =model(A__ )
_lowercase =outputs.logits
_lowercase =(1, 1000)
if "l1" in model_name:
_lowercase =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , A__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_lowercase =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , A__ , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_lowercase =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(A__ )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add model' , use_temp_dir=A__ , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='Add image processor' , use_temp_dir=A__ , )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowercase_ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 205
|
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase_ = data_utils.TransfoXLTokenizer
lowercase_ = data_utils.TransfoXLCorpus
lowercase_ = data_utils
lowercase_ = data_utils
def a ( A__ : int , A__ : Dict , A__ : Union[str, Any] , A__ : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
_lowercase =pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
_lowercase =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'''Save vocabulary to {pytorch_vocab_dump_path}''' )
_lowercase =corpus.vocab.__dict__
torch.save(A__ , A__ )
_lowercase =corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
_lowercase =pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'''Save dataset to {pytorch_dataset_dump_path}''' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
_lowercase =os.path.abspath(A__ )
_lowercase =os.path.abspath(A__ )
print(F'''Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.''' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
_lowercase =TransfoXLConfig()
else:
_lowercase =TransfoXLConfig.from_json_file(A__ )
print(F'''Building PyTorch model from configuration: {config}''' )
_lowercase =TransfoXLLMHeadModel(A__ )
_lowercase =load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
_lowercase =os.path.join(A__ , A__ )
_lowercase =os.path.join(A__ , A__ )
print(F'''Save PyTorch model to {os.path.abspath(A__ )}''' )
torch.save(model.state_dict() , A__ )
print(F'''Save configuration file to {os.path.abspath(A__ )}''' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
lowercase_ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 205
| 1
|
def UpperCamelCase_( _snake_case : int = 50 ):
"""simple docstring"""
__a =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
|
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_lowerCAmelCase : List[Any] = logging.getLogger(__name__)
_lowerCAmelCase : Optional[Any] = "Hello world! cécé herlolip"
_lowerCAmelCase : str = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def UpperCamelCase_( _snake_case : str , _snake_case : List[Any] ):
"""simple docstring"""
__a =BertAbsConfig(
temp_dir='.' , finetune_bert=_snake_case , large=_snake_case , share_emb=_snake_case , use_bert_emb=_snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
__a =torch.load(_snake_case , lambda _snake_case , _snake_case : storage )
__a =AbsSummarizer(_snake_case , torch.device('cpu' ) , _snake_case )
original.eval()
__a =BertAbsSummarizer(_snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
__a =BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
__a =tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
__a =tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(_snake_case )) )
__a =torch.tensor(_snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
__a =encoder_input_ids
__a =decoder_input_ids
__a =__a =None
__a =None
__a =__a =None
__a =__a =None
__a =None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
__a =original(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =original.generator(_snake_case )
__a =new_model(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )[0]
__a =new_model.generator(_snake_case )
__a =torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(_snake_case ) )
__a =torch.allclose(_snake_case , _snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 308
| 1
|
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class a ( lowercase__ ):
"""simple docstring"""
a : Optional[Any] = ComputeEnvironment.AMAZON_SAGEMAKER
a : Union[str, Any] = True
a : Optional[Any] = 'ml.p3.2xlarge'
a : List[Any] = 'accelerate_sagemaker_execution_role'
a : Any = 'hf-sm'
a : Optional[int] = 'us-east-1'
a : str = 1
a : Dict = 'accelerate-sagemaker-1'
a : str = '1.6'
a : Dict = '4.4'
a : List[str] = 'train.py'
a : Optional[int] = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
a : Union[str, Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Tuple ) -> Tuple:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCAmelCase : Any = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] , __lowercase )
assert isinstance(converted_args["""do_train"""] , __lowercase )
assert isinstance(converted_args["""epochs"""] , __lowercase )
assert isinstance(converted_args["""learning_rate"""] , __lowercase )
assert isinstance(converted_args["""max_steps"""] , __lowercase )
with pytest.raises(__lowercase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 114
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
class a ( lowercase__ ):
"""simple docstring"""
a : Any = ['pixel_values']
def __init__( self : Optional[int] , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : int = 0.9 , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : bool = True , __lowercase : Dict[str, int] = None , __lowercase : Union[int, float] = 1 / 255 , __lowercase : bool = True , __lowercase : bool = True , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , **__lowercase : Any , ) -> None:
super().__init__(**__lowercase )
__UpperCAmelCase : Tuple = size if size is not None else {"""shortest_edge""": 224}
__UpperCAmelCase : Union[str, Any] = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
__UpperCAmelCase : Any = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = do_resize
__UpperCAmelCase : Dict = size
__UpperCAmelCase : Tuple = crop_pct
__UpperCAmelCase : List[Any] = resample
__UpperCAmelCase : List[Any] = do_center_crop
__UpperCAmelCase : List[Any] = crop_size
__UpperCAmelCase : Any = do_rescale
__UpperCAmelCase : Tuple = rescale_factor
__UpperCAmelCase : int = do_normalize
__UpperCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__UpperCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase ( self : Tuple , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[float] = None , __lowercase : PILImageResampling = PILImageResampling.BICUBIC , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Optional[int] , ) -> np.ndarray:
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , default_to_square=__lowercase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
__UpperCAmelCase : Union[str, Any] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
__UpperCAmelCase : Tuple = int(size["""height"""] / crop_pct )
else:
__UpperCAmelCase : str = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
__UpperCAmelCase : str = get_resize_output_image_size(__lowercase , size=__lowercase , default_to_square=__lowercase )
else:
if "shortest_edge" in size:
__UpperCAmelCase : List[str] = get_resize_output_image_size(__lowercase , size=size["""shortest_edge"""] , default_to_square=__lowercase )
elif "height" in size and "width" in size:
__UpperCAmelCase : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(__lowercase ) )
return resize(__lowercase , size=__lowercase , resample=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Dict , __lowercase : np.ndarray , __lowercase : Dict[str, int] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : Union[str, Any] , ) -> np.ndarray:
__UpperCAmelCase : Optional[Any] = get_size_dict(__lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__lowercase , size=(size["""height"""], size["""width"""]) , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[str] , __lowercase : np.ndarray , __lowercase : Union[int, float] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : int , ) -> int:
return rescale(__lowercase , scale=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : List[Any] , __lowercase : np.ndarray , __lowercase : Union[float, List[float]] , __lowercase : Union[float, List[float]] , __lowercase : Optional[Union[str, ChannelDimension]] = None , **__lowercase : List[Any] , ) -> np.ndarray:
return normalize(__lowercase , mean=__lowercase , std=__lowercase , data_format=__lowercase , **__lowercase )
def UpperCAmelCase ( self : Any , __lowercase : ImageInput , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : int = None , __lowercase : PILImageResampling = None , __lowercase : bool = None , __lowercase : Dict[str, int] = None , __lowercase : bool = None , __lowercase : float = None , __lowercase : bool = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[float, List[float]]] = None , __lowercase : Optional[Union[str, TensorType]] = None , __lowercase : ChannelDimension = ChannelDimension.FIRST , **__lowercase : List[str] , ) -> PIL.Image.Image:
__UpperCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__UpperCAmelCase : Optional[Any] = resample if resample is not None else self.resample
__UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCAmelCase : Dict = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase : Tuple = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase : Any = image_std if image_std is not None else self.image_std
__UpperCAmelCase : Optional[int] = size if size is not None else self.size
__UpperCAmelCase : Dict = get_size_dict(__lowercase , default_to_square=__lowercase )
__UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size
__UpperCAmelCase : Tuple = get_size_dict(__lowercase , param_name="""crop_size""" )
__UpperCAmelCase : Dict = make_list_of_images(__lowercase )
if not valid_images(__lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
__UpperCAmelCase : str = [to_numpy_array(__lowercase ) for image in images]
if do_resize:
__UpperCAmelCase : str = [self.resize(image=__lowercase , size=__lowercase , crop_pct=__lowercase , resample=__lowercase ) for image in images]
if do_center_crop:
__UpperCAmelCase : Any = [self.center_crop(image=__lowercase , size=__lowercase ) for image in images]
if do_rescale:
__UpperCAmelCase : List[str] = [self.rescale(image=__lowercase , scale=__lowercase ) for image in images]
if do_normalize:
__UpperCAmelCase : str = [self.normalize(image=__lowercase , mean=__lowercase , std=__lowercase ) for image in images]
__UpperCAmelCase : List[str] = [to_channel_dimension_format(__lowercase , __lowercase ) for image in images]
__UpperCAmelCase : Any = {"""pixel_values""": images}
return BatchFeature(data=__lowercase , tensor_type=__lowercase )
| 114
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = LEDTokenizer
UpperCAmelCase = LEDTokenizerFast
UpperCAmelCase = True
def _snake_case ( self ) -> Any:
super().setUp()
_UpperCAmelCase : Optional[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_UpperCAmelCase : List[str] = dict(zip(a_ ,range(len(a_ ) ) ) )
_UpperCAmelCase : str = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_UpperCAmelCase : Optional[int] = {"""unk_token""": """<unk>"""}
_UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _snake_case ( self ,**a_ ) -> Optional[int]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,**a_ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname ,**a_ )
def _snake_case ( self ,a_ ) -> int:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> str:
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def _snake_case ( self ) -> List[Any]:
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
_UpperCAmelCase : Optional[Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = tokenizer(a_ ,max_length=len(a_ ) ,padding=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
_UpperCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(a_ ,a_ )
@require_torch
def _snake_case ( self ) -> Any:
_UpperCAmelCase : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : int = tokenizer(a_ ,padding=a_ ,return_tensors="""pt""" )
self.assertIn("""input_ids""" ,a_ )
self.assertIn("""attention_mask""" ,a_ )
self.assertNotIn("""labels""" ,a_ )
self.assertNotIn("""decoder_attention_mask""" ,a_ )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[int] = tokenizer(text_target=a_ ,max_length=32 ,padding="""max_length""" ,return_tensors="""pt""" )
self.assertEqual(32 ,targets["""input_ids"""].shape[1] )
@require_torch
def _snake_case ( self ) -> Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[str] = tokenizer(
["""I am a small frog""" * 1_024, """I am a small frog"""] ,padding=a_ ,truncation=a_ ,return_tensors="""pt""" )
self.assertIsInstance(a_ ,a_ )
self.assertEqual(batch.input_ids.shape ,(2, 5_122) )
@require_torch
def _snake_case ( self ) -> List[Any]:
_UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization."""]
_UpperCAmelCase : str = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Any = tokenizer(a_ ,return_tensors="""pt""" )
_UpperCAmelCase : Any = tokenizer(text_target=a_ ,return_tensors="""pt""" )
_UpperCAmelCase : List[str] = inputs["""input_ids"""]
_UpperCAmelCase : int = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def _snake_case ( self ) -> List[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = ["""Summary of the text.""", """Another summary."""]
_UpperCAmelCase : int = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
_UpperCAmelCase : Any = tokenizer(a_ ,padding=a_ )
_UpperCAmelCase : Any = [[0] * len(a_ ) for x in encoded_output["""input_ids"""]]
_UpperCAmelCase : Optional[int] = tokenizer.pad(a_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] ,a_ )
def _snake_case ( self ) -> List[str]:
pass
def _snake_case ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_UpperCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(a_ ,**a_ )
_UpperCAmelCase : int = """A, <mask> AllenNLP sentence."""
_UpperCAmelCase : str = tokenizer_r.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
_UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(a_ ,add_special_tokens=a_ ,return_token_type_ids=a_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) ,sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) ,sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) ,)
_UpperCAmelCase : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] ,[0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
a_ ,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 361
|
'''simple docstring'''
A_ : Optional[Any] = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 349
| 0
|
from __future__ import annotations
def snake_case_ ( snake_case , snake_case ) -> list[int]:
lowercase__: List[str] = 0
lowercase__: Dict = len(snake_case ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__: Dict = i + 1
else:
lowercase__: List[Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 196
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case_ ( snake_case=32 , snake_case=10 , snake_case=1_00 , snake_case=10_26 , snake_case=True , snake_case="data/tokenized_stories_train_wikitext103.jbl" , snake_case="igf_context_pairs.jbl" , ) -> Union[str, Any]:
set_seed(3 )
# generate train_data and objective_set
lowercase__ , lowercase__: List[str] = generate_datasets(
snake_case , snake_case , number=snake_case , min_len=10_26 , trim=snake_case )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
lowercase__: Optional[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
lowercase__: str = load_gpta('gpt2' ).to(snake_case )
print('computing perplexity on objective set' )
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case ).item()
print('perplexity on objective set:' , snake_case )
# collect igf pairs and save to file demo.jbl
collect_objective_set(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case_ ( snake_case , snake_case=15 , snake_case=1_28 , snake_case=1_00 , snake_case="igf_model.pt" , ) -> Optional[Any]:
set_seed(42 )
# Load pre-trained model
lowercase__: Any = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
lowercase__: Any = SecondaryLearner(snake_case )
# Train secondary learner
lowercase__: Tuple = train_secondary_learner(
snake_case , snake_case , max_epochs=snake_case , batch_size=snake_case , eval_freq=1_00 , igf_model_path=snake_case , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case_ ( snake_case , snake_case , snake_case , snake_case=32 , snake_case=10_00 , snake_case=16 , snake_case=1.0 , snake_case=recopy_gpta , snake_case=None , snake_case=10 , snake_case="gpt2_finetuned.pt" , ) -> Tuple:
lowercase__: Dict = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
lowercase__: Optional[int] = RandomSampler(snake_case )
lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case )
lowercase__: int = max_steps // (len(snake_case )) + 1
lowercase__: Union[str, Any] = 0
lowercase__: Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=snake_case )
lowercase__ , lowercase__ , lowercase__: Union[str, Any] = recopy_model(snake_case , snake_case , snake_case )
model.train()
if secondary_learner is not None:
secondary_learner.to(snake_case )
secondary_learner.eval()
lowercase__: List[Any] = []
lowercase__: str = 0
lowercase__: Tuple = []
lowercase__: Dict = []
# Compute the performance of the transformer model at the beginning
lowercase__: Optional[Any] = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
for epoch in range(int(snake_case ) ):
for step, example in enumerate(snake_case ):
torch.cuda.empty_cache()
lowercase__: Union[str, Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
lowercase__: Dict = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
lowercase__: Union[str, Any] = model(snake_case , labels=snake_case )
lowercase__: Tuple = True
if secondary_learner is not None:
lowercase__: Optional[Any] = secondary_learner.forward(
torch.tensor(snake_case , dtype=torch.long , device=snake_case ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(snake_case ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
lowercase__: Optional[Any] = -1
if predicted_q < threshold:
lowercase__: str = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
lowercase__: List[Any] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
lowercase__: Any = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
lowercase__: int = compute_perplexity(snake_case , snake_case , snake_case )
test_perps.append(snake_case )
print('Test perplexity, step' , snake_case , ':' , snake_case )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , snake_case )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case_ ( ) -> str:
lowercase__: Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=snake_case , type=snake_case , required=snake_case , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=snake_case , default=snake_case , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=snake_case , default=snake_case , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=snake_case , type=snake_case , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=snake_case , default=snake_case , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=snake_case , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=1_00 , type=snake_case , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=1_00 , type=snake_case , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=10_00 , type=snake_case , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=1_28 , type=snake_case , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=snake_case , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=snake_case , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=1_00 , type=snake_case , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=10_26 , type=snake_case , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=snake_case , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=snake_case , type=snake_case , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=snake_case , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=snake_case , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=snake_case , type=snake_case , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=snake_case , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
lowercase__: Tuple = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
lowercase__: List[str] = training_secondary_learner(
snake_case , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
lowercase__: Dict = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
lowercase__ , lowercase__: Tuple = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=1_00 , min_len=10_26 , trim=snake_case )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
snake_case , snake_case , snake_case , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=snake_case , secondary_learner=snake_case , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 196
| 1
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : List[str] = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
lowerCAmelCase_ : Union[str, Any] = {
'b0': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 2_24,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 12_80,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 2_40,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 14_08,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 2_60,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 15_36,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 3_00,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 17_92,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 3_80,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 20_48,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 4_56,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 23_04,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 5_28,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 25_60,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 6_00,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def _lowerCamelCase ( lowercase : int ) -> Any:
_a = EfficientNetConfig()
_a = CONFIG_MAP[model_name]["hidden_dim"]
_a = CONFIG_MAP[model_name]["width_coef"]
_a = CONFIG_MAP[model_name]["depth_coef"]
_a = CONFIG_MAP[model_name]["image_size"]
_a = CONFIG_MAP[model_name]["dropout_rate"]
_a = CONFIG_MAP[model_name]["dw_padding"]
_a = "huggingface/label-files"
_a = "imagenet-1k-id2label.json"
_a = 1000
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( ) -> Any:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> str:
_a = CONFIG_MAP[model_name]["image_size"]
_a = EfficientNetImageProcessor(
size={"height": size, "width": size} , image_mean=[0.4_85, 0.4_56, 0.4_06] , image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] , do_center_crop=lowercase , )
return preprocessor
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> List[Any]:
_a = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
_a = sorted(set(lowercase ) )
_a = len(lowercase )
_a = {b: str(lowercase ) for b, i in zip(lowercase , range(lowercase ) )}
_a = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
_a = block_name_mapping[b]
rename_keys.append((F'block{b}_expand_conv/kernel:0', F'encoder.blocks.{hf_b}.expansion.expand_conv.weight') )
rename_keys.append((F'block{b}_expand_bn/gamma:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.weight') )
rename_keys.append((F'block{b}_expand_bn/beta:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.bias') )
rename_keys.append(
(F'block{b}_expand_bn/moving_mean:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_mean') )
rename_keys.append(
(F'block{b}_expand_bn/moving_variance:0', F'encoder.blocks.{hf_b}.expansion.expand_bn.running_var') )
rename_keys.append(
(F'block{b}_dwconv/depthwise_kernel:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight') )
rename_keys.append((F'block{b}_bn/gamma:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight') )
rename_keys.append((F'block{b}_bn/beta:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias') )
rename_keys.append(
(F'block{b}_bn/moving_mean:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean') )
rename_keys.append(
(F'block{b}_bn/moving_variance:0', F'encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var') )
rename_keys.append((F'block{b}_se_reduce/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.weight') )
rename_keys.append((F'block{b}_se_reduce/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.reduce.bias') )
rename_keys.append((F'block{b}_se_expand/kernel:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.weight') )
rename_keys.append((F'block{b}_se_expand/bias:0', F'encoder.blocks.{hf_b}.squeeze_excite.expand.bias') )
rename_keys.append(
(F'block{b}_project_conv/kernel:0', F'encoder.blocks.{hf_b}.projection.project_conv.weight') )
rename_keys.append((F'block{b}_project_bn/gamma:0', F'encoder.blocks.{hf_b}.projection.project_bn.weight') )
rename_keys.append((F'block{b}_project_bn/beta:0', F'encoder.blocks.{hf_b}.projection.project_bn.bias') )
rename_keys.append(
(F'block{b}_project_bn/moving_mean:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_mean') )
rename_keys.append(
(F'block{b}_project_bn/moving_variance:0', F'encoder.blocks.{hf_b}.projection.project_bn.running_var') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
_a = {}
for item in rename_keys:
if item[0] in original_param_names:
_a = "efficientnet." + item[1]
_a = "classifier.weight"
_a = "classifier.bias"
return key_mapping
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : str ) -> Optional[int]:
for key, value in tf_params.items():
if "normalization" in key:
continue
_a = key_mapping[key]
if "_conv" in key and "kernel" in key:
_a = torch.from_numpy(lowercase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
_a = torch.from_numpy(lowercase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
_a = torch.from_numpy(np.transpose(lowercase ) )
else:
_a = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Any , lowercase : List[str] , lowercase : int , lowercase : List[Any] ) -> Optional[Any]:
_a = model_classes[model_name](
include_top=lowercase , weights="imagenet" , input_tensor=lowercase , input_shape=lowercase , pooling=lowercase , classes=1000 , classifier_activation="softmax" , )
_a = original_model.trainable_variables
_a = original_model.non_trainable_variables
_a = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_a = param.numpy()
_a = list(tf_params.keys() )
# Load HuggingFace model
_a = get_efficientnet_config(lowercase )
_a = EfficientNetForImageClassification(lowercase ).eval()
_a = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
_a = rename_keys(lowercase )
replace_params(lowercase , lowercase , lowercase )
# Initialize preprocessor and preprocess input image
_a = convert_image_processor(lowercase )
_a = preprocessor(images=prepare_img() , return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_a = hf_model(**lowercase )
_a = outputs.logits.detach().numpy()
# Original model inference
_a = False
_a = CONFIG_MAP[model_name]["image_size"]
_a = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
_a = image.img_to_array(lowercase )
_a = np.expand_dims(lowercase , axis=0 )
_a = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase , lowercase , atol=1E-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(F'Pushing converted {model_name} to the hub...' )
_a = F'efficientnet-{model_name}'
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
lowerCAmelCase_ : List[Any] = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 362
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Union[str, Any]:
_enforce_args(lowercase , lowercase )
if n == 0:
return 0
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase ) )
return max_revue
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Tuple:
_enforce_args(lowercase , lowercase )
_a = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : list , lowercase : list ) -> List[str]:
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_a = float("-inf" )
for i in range(1 , n + 1 ):
_a = max(
lowercase , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase , lowercase ) , )
_a = max_revenue
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Any:
_enforce_args(lowercase , lowercase )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_a = [float("-inf" ) for _ in range(n + 1 )]
_a = 0
for i in range(1 , n + 1 ):
_a = max_rev[i]
for j in range(1 , i + 1 ):
_a = max(lowercase , prices[j - 1] + max_rev[i - j] )
_a = max_revenue_i
return max_rev[n]
def _lowerCamelCase ( lowercase : int , lowercase : list ) -> Dict:
if n < 0:
_a = F'n must be greater than or equal to 0. Got n = {n}'
raise ValueError(lowercase )
if n > len(lowercase ):
_a = (
"Each integral piece of rod must have a corresponding price. "
F'Got n = {n} but length of prices = {len(lowercase )}'
)
raise ValueError(lowercase )
def _lowerCamelCase ( ) -> Any:
_a = [6, 10, 12, 15, 20, 23]
_a = len(lowercase )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_a = 36
_a = top_down_cut_rod(lowercase , lowercase )
_a = bottom_up_cut_rod(lowercase , lowercase )
_a = naive_cut_rod_recursive(lowercase , lowercase )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main()
| 346
| 0
|
"""simple docstring"""
from collections import defaultdict
def lowercase ( __snake_case : str , __snake_case : str ):
lowercase_ : int = first_str.lower().strip()
lowercase_ : Any = second_str.lower().strip()
# Remove whitespace
lowercase_ : int = first_str.replace(''' ''' , '''''' )
lowercase_ : Optional[int] = second_str.replace(''' ''' , '''''' )
# Strings of different lengths are not anagrams
if len(__snake_case ) != len(__snake_case ):
return False
# Default values for count should be 0
lowercase_ : defaultdict[str, int] = defaultdict(__snake_case )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(__snake_case ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
__A : Optional[Any] = input('''Enter the first string ''').strip()
__A : Any = input('''Enter the second string ''').strip()
__A : Any = check_anagrams(input_a, input_b)
print(F"""{input_a} and {input_b} are {'' if status else 'not '}anagrams.""")
| 33
|
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : int , lowerCamelCase : int=None , lowerCamelCase : int=None ) -> str:
lowerCAmelCase_ : str = data
lowerCAmelCase_ : Optional[Any] = previous
lowerCAmelCase_ : int = next_node
def __str__( self : Any ) -> str:
return F'{self.data}'
def __lowercase ( self : Optional[Any] ) -> int:
return self.data
def __lowercase ( self : str ) -> List[str]:
return self.next
def __lowercase ( self : int ) -> Optional[int]:
return self.previous
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCamelCase : Optional[int] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = head
def __iter__( self : str ) -> Optional[Any]:
return self
def __lowercase ( self : Union[str, Any] ) -> Dict:
if not self.current:
raise StopIteration
else:
lowerCAmelCase_ : Dict = self.current.get_data()
lowerCAmelCase_ : Tuple = self.current.get_next()
return value
class __snake_case :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = None # First node in list
lowerCAmelCase_ : Optional[Any] = None # Last node in list
def __str__( self : Optional[int] ) -> Dict:
lowerCAmelCase_ : str = self.head
lowerCAmelCase_ : Tuple = []
while current is not None:
nodes.append(current.get_data() )
lowerCAmelCase_ : str = current.get_next()
return " ".join(str(lowerCamelCase ) for node in nodes )
def __contains__( self : List[Any] , lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : List[str] = self.head
while current:
if current.get_data() == value:
return True
lowerCAmelCase_ : List[Any] = current.get_next()
return False
def __iter__( self : str ) -> Optional[Any]:
return LinkedListIterator(self.head )
def __lowercase ( self : Dict ) -> Optional[int]:
if self.head:
return self.head.get_data()
return None
def __lowercase ( self : List[str] ) -> Optional[Any]:
if self.tail:
return self.tail.get_data()
return None
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node ) -> None:
if self.head is None:
lowerCAmelCase_ : Union[str, Any] = node
lowerCAmelCase_ : List[str] = node
else:
self.insert_before_node(self.head , lowerCamelCase )
def __lowercase ( self : Tuple , lowerCamelCase : Node ) -> None:
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : Union[str, Any] , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : int = Node(lowerCamelCase )
if self.head is None:
self.set_head(lowerCamelCase )
else:
self.set_tail(lowerCamelCase )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : List[Any] = node.previous
if node.get_previous() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Dict = node_to_insert
lowerCAmelCase_ : Optional[int] = node_to_insert
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Node , lowerCamelCase : Node ) -> None:
lowerCAmelCase_ : Optional[int] = node
lowerCAmelCase_ : Tuple = node.next
if node.get_next() is None:
lowerCAmelCase_ : Tuple = node_to_insert
else:
lowerCAmelCase_ : Tuple = node_to_insert
lowerCAmelCase_ : Optional[Any] = node_to_insert
def __lowercase ( self : Dict , lowerCamelCase : int , lowerCamelCase : int ) -> None:
lowerCAmelCase_ : List[str] = 1
lowerCAmelCase_ : Tuple = Node(lowerCamelCase )
lowerCAmelCase_ : List[Any] = self.head
while node:
if current_position == position:
self.insert_before_node(lowerCamelCase , lowerCamelCase )
return
current_position += 1
lowerCAmelCase_ : str = node.next
self.insert_after_node(self.tail , lowerCamelCase )
def __lowercase ( self : int , lowerCamelCase : int ) -> Node:
lowerCAmelCase_ : List[Any] = self.head
while node:
if node.get_data() == item:
return node
lowerCAmelCase_ : List[Any] = node.get_next()
raise Exception("""Node not found""" )
def __lowercase ( self : str , lowerCamelCase : str ) -> int:
if (node := self.get_node(lowerCamelCase )) is not None:
if node == self.head:
lowerCAmelCase_ : Any = self.head.get_next()
if node == self.tail:
lowerCAmelCase_ : Optional[int] = self.tail.get_previous()
self.remove_node_pointers(lowerCamelCase )
@staticmethod
def __lowercase ( lowerCamelCase : Node ) -> None:
if node.get_next():
lowerCAmelCase_ : Tuple = node.previous
if node.get_previous():
lowerCAmelCase_ : Any = node.next
lowerCAmelCase_ : List[Any] = None
lowerCAmelCase_ : Any = None
def __lowercase ( self : str ) -> Optional[Any]:
return self.head is None
def UpperCamelCase_ ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120
| 0
|
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[str] = SpeechTaTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Dict = True
def lowerCAmelCase__ ( self: Optional[int] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase = SpeechTaTokenizer(UpperCamelCase_ )
__lowerCamelCase = AddedToken("""<mask>""" , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )
__lowerCamelCase = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: str ):
__lowerCamelCase = """this is a test"""
__lowerCamelCase = """this is a test"""
return input_text, output_text
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: List[str]=False , UpperCamelCase_: Any=20 , UpperCamelCase_: Tuple=5 ):
__lowerCamelCase, __lowerCamelCase = self.get_input_output_texts(UpperCamelCase_ )
__lowerCamelCase = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
__lowerCamelCase = tokenizer.decode(UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ )
return text, ids
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = """<pad>"""
__lowerCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase_ ) , UpperCamelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase_ ) , UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-4] , """œ""" )
self.assertEqual(vocab_keys[-2] , """<mask>""" )
self.assertEqual(vocab_keys[-1] , """<ctc_blank>""" )
self.assertEqual(len(UpperCamelCase_ ) , 81 )
def lowerCAmelCase__ ( self: str ):
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizers(do_lower_case=UpperCamelCase_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCamelCase = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
__lowerCamelCase = tokenizer.add_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size + len(UpperCamelCase_ ) )
__lowerCamelCase = tokenizer.encode("""aaaaa bbbbbb low cccccccccdddddddd l""" , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
__lowerCamelCase = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
__lowerCamelCase = tokenizer.add_special_tokens(UpperCamelCase_ )
__lowerCamelCase = tokenizer.vocab_size
__lowerCamelCase = len(UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , 0 )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , len(UpperCamelCase_ ) )
self.assertEqual(UpperCamelCase_ , all_size_a + len(UpperCamelCase_ ) )
__lowerCamelCase = tokenizer.encode(
""">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l""" , add_special_tokens=UpperCamelCase_ )
self.assertGreaterEqual(len(UpperCamelCase_ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self: Union[str, Any] ):
pass
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = tokenizer.tokenize("""This is a test""" )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [SPIECE_UNDERLINE, """T""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """a""", SPIECE_UNDERLINE, """t""", """e""", """s""", """t"""] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
__lowerCamelCase = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """92000""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
__lowerCamelCase = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
# fmt: off
self.assertListEqual(UpperCamelCase_ , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
__lowerCamelCase = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [SPIECE_UNDERLINE, """I""", SPIECE_UNDERLINE, """w""", """a""", """s""", SPIECE_UNDERLINE, """b""", """o""", """r""", """n""", SPIECE_UNDERLINE, """i""", """n""", SPIECE_UNDERLINE, """<unk>""", """,""", SPIECE_UNDERLINE, """a""", """n""", """d""", SPIECE_UNDERLINE, """t""", """h""", """i""", """s""", SPIECE_UNDERLINE, """i""", """s""", SPIECE_UNDERLINE, """f""", """a""", """l""", """s""", """é""", """."""] )
@slow
def lowerCAmelCase__ ( self: str ):
# Use custom sequence because this tokenizer does not handle numbers.
__lowerCamelCase = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
__lowerCamelCase = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase_ , model_name="""microsoft/speecht5_asr""" , revision="""c5ef64c71905caeccde0e4462ef3f9077224c524""" , sequences=UpperCamelCase_ , )
| 29
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = 'yolos'
def __init__( self: Dict , UpperCamelCase_: List[Any]=7_68 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=12 , UpperCamelCase_: int=30_72 , UpperCamelCase_: List[str]="gelu" , UpperCamelCase_: Union[str, Any]=0.0 , UpperCamelCase_: int=0.0 , UpperCamelCase_: Optional[int]=0.02 , UpperCamelCase_: Dict=1E-12 , UpperCamelCase_: List[Any]=[5_12, 8_64] , UpperCamelCase_: Optional[int]=16 , UpperCamelCase_: Any=3 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[str]=1_00 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Any=False , UpperCamelCase_: Optional[Any]=1 , UpperCamelCase_: Any=5 , UpperCamelCase_: Any=2 , UpperCamelCase_: Tuple=5 , UpperCamelCase_: str=2 , UpperCamelCase_: Any=0.1 , **UpperCamelCase_: Any , ):
super().__init__(**UpperCamelCase_ )
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = qkv_bias
__lowerCamelCase = num_detection_tokens
__lowerCamelCase = use_mid_position_embeddings
__lowerCamelCase = auxiliary_loss
# Hungarian matcher
__lowerCamelCase = class_cost
__lowerCamelCase = bbox_cost
__lowerCamelCase = giou_cost
# Loss coefficients
__lowerCamelCase = bbox_loss_coefficient
__lowerCamelCase = giou_loss_coefficient
__lowerCamelCase = eos_coefficient
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : Tuple = version.parse('1.11')
@property
def lowerCAmelCase__ ( self: Any ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase__ ( self: Dict ):
return 1E-4
@property
def lowerCAmelCase__ ( self: Dict ):
return 12
| 29
| 1
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _A ( _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : int = AudioLDMPipeline
_UpperCamelCase : Tuple = TEXT_TO_AUDIO_PARAMS
_UpperCamelCase : int = TEXT_TO_AUDIO_BATCH_PARAMS
_UpperCamelCase : int = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=_A , )
lowercase : Tuple = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
lowercase : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : str = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowercase : Optional[Any] = ClapTextModelWithProjection(_A )
lowercase : Any = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 )
lowercase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=_A , )
lowercase : Any = SpeechTaHifiGan(_A )
lowercase : List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''vocoder''': vocoder,
}
return components
def __a ( self : Union[str, Any] , _A : str , _A : Any=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(_A ).startswith('''mps''' ):
lowercase : Union[str, Any] = torch.manual_seed(_A )
else:
lowercase : Dict = torch.Generator(device=_A ).manual_seed(_A )
lowercase : int = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
}
return inputs
def __a ( self : Union[str, Any] ) -> int:
"""simple docstring"""
lowercase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : int = AudioLDMPipeline(**_A )
lowercase : int = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Dict = self.get_dummy_inputs(_A )
lowercase : Dict = audioldm_pipe(**_A )
lowercase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
lowercase : int = audio[:10]
lowercase : Any = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __a ( self : List[str] ) -> str:
"""simple docstring"""
lowercase : Optional[int] = self.get_dummy_components()
lowercase : Union[str, Any] = AudioLDMPipeline(**_A )
lowercase : Any = audioldm_pipe.to(_A )
lowercase : Union[str, Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[Any] = self.get_dummy_inputs(_A )
lowercase : Tuple = 3 * [inputs['''prompt''']]
# forward
lowercase : Any = audioldm_pipe(**_A )
lowercase : Optional[Any] = output.audios[0]
lowercase : Any = self.get_dummy_inputs(_A )
lowercase : Any = 3 * [inputs.pop('''prompt''' )]
lowercase : Optional[Any] = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
lowercase : Any = text_inputs['''input_ids'''].to(_A )
lowercase : Union[str, Any] = audioldm_pipe.text_encoder(
_A , )
lowercase : Optional[int] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : str = F.normalize(_A , dim=-1 )
lowercase : List[str] = prompt_embeds
# forward
lowercase : Dict = audioldm_pipe(**_A )
lowercase : Tuple = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __a ( self : Optional[Any] ) -> Any:
"""simple docstring"""
lowercase : Tuple = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : Tuple = audioldm_pipe.to(_A )
lowercase : int = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = self.get_dummy_inputs(_A )
lowercase : str = 3 * ['''this is a negative prompt''']
lowercase : Any = negative_prompt
lowercase : Any = 3 * [inputs['''prompt''']]
# forward
lowercase : List[str] = audioldm_pipe(**_A )
lowercase : List[str] = output.audios[0]
lowercase : Optional[Any] = self.get_dummy_inputs(_A )
lowercase : Union[str, Any] = 3 * [inputs.pop('''prompt''' )]
lowercase : Dict = []
for p in [prompt, negative_prompt]:
lowercase : List[str] = audioldm_pipe.tokenizer(
_A , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=_A , return_tensors='''pt''' , )
lowercase : Union[str, Any] = text_inputs['''input_ids'''].to(_A )
lowercase : int = audioldm_pipe.text_encoder(
_A , )
lowercase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowercase : int = F.normalize(_A , dim=-1 )
embeds.append(_A )
lowercase , lowercase : Any = embeds
# forward
lowercase : List[Any] = audioldm_pipe(**_A )
lowercase : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def __a ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Union[str, Any] = PNDMScheduler(skip_prk_steps=_A )
lowercase : Dict = AudioLDMPipeline(**_A )
lowercase : Union[str, Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : str = self.get_dummy_inputs(_A )
lowercase : Optional[Any] = '''egg cracking'''
lowercase : Union[str, Any] = audioldm_pipe(**_A , negative_prompt=_A )
lowercase : int = output.audios[0]
assert audio.ndim == 1
assert len(_A ) == 256
lowercase : List[Any] = audio[:10]
lowercase : Dict = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def __a ( self : Optional[Any] ) -> str:
"""simple docstring"""
lowercase : Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[Any] = self.get_dummy_components()
lowercase : int = PNDMScheduler(skip_prk_steps=_A )
lowercase : int = AudioLDMPipeline(**_A )
lowercase : Any = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = '''A hammer hitting a wooden surface'''
# test num_waveforms_per_prompt=1 (default)
lowercase : Optional[Any] = audioldm_pipe(_A , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowercase : Any = 2
lowercase : Any = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowercase : Dict = 2
lowercase : Union[str, Any] = audioldm_pipe(_A , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowercase : Optional[Any] = 2
lowercase : List[Any] = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=_A ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def __a ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Dict = audioldm_pipe.vocoder.config.sampling_rate
lowercase : Optional[int] = self.get_dummy_inputs(_A )
lowercase : Optional[int] = audioldm_pipe(audio_length_in_s=0.016 , **_A )
lowercase : Optional[Any] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.016
lowercase : Dict = audioldm_pipe(audio_length_in_s=0.032 , **_A )
lowercase : List[str] = output.audios[0]
assert audio.ndim == 1
assert len(_A ) / vocoder_sampling_rate == 0.032
def __a ( self : Optional[int] ) -> str:
"""simple docstring"""
lowercase : str = self.get_dummy_components()
lowercase : Optional[Any] = AudioLDMPipeline(**_A )
lowercase : Tuple = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : List[str] = ['''hey''']
lowercase : Dict = audioldm_pipe(_A , num_inference_steps=1 )
lowercase : Optional[Any] = output.audios.shape
assert audio_shape == (1, 256)
lowercase : Union[str, Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowercase : Optional[int] = SpeechTaHifiGan(_A ).to(_A )
lowercase : Dict = audioldm_pipe(_A , num_inference_steps=1 )
lowercase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def __a ( self : Tuple ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_A )
def __a ( self : int ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=_A )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __a ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A )
@slow
class _A ( unittest.TestCase ):
def __a ( self : str ) -> str:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : Optional[int] , _A : List[Any] , _A : str="cpu" , _A : Dict=torch.floataa , _A : str=0 ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] = torch.Generator(device=_A ).manual_seed(_A )
lowercase : List[str] = np.random.RandomState(_A ).standard_normal((1, 8, 128, 16) )
lowercase : List[Any] = torch.from_numpy(_A ).to(device=_A , dtype=_A )
lowercase : Optional[Any] = {
'''prompt''': '''A hammer hitting a wooden surface''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 2.5,
}
return inputs
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : Dict = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[int] = self.get_inputs(_A )
lowercase : Any = 25
lowercase : str = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 81_920
lowercase : Dict = audio[77_230:77_240]
lowercase : Any = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
lowercase : Any = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def __a ( self : List[Any] ) -> str:
"""simple docstring"""
lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' )
lowercase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowercase : List[Any] = audioldm_pipe.to(_A )
audioldm_pipe.set_progress_bar_config(disable=_A )
lowercase : Optional[int] = self.get_inputs(_A )
lowercase : Tuple = audioldm_pipe(**_A ).audios[0]
assert audio.ndim == 1
assert len(_A ) == 81_920
lowercase : Dict = audio[27_780:27_790]
lowercase : int = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
lowercase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 308
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 1
|
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Tuple = set({'(', '[', '{'} )
SCREAMING_SNAKE_CASE_ : Tuple = set({')', ']', '}'} )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(A__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(A__ ) == 0 or (len(A__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(A__ ) == 0
def a__ ( ):
SCREAMING_SNAKE_CASE_ : List[Any] = input('Enter sequence of brackets: ' )
if is_balanced(A__ ):
print(A__, 'is balanced' )
else:
print(A__, 'is not balanced' )
if __name__ == "__main__":
main()
| 361
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def a__ ( A__ ):
if is_torch_version('<', '2.0.0' ) or not hasattr(A__, '_dynamo' ):
return False
return isinstance(A__, torch._dynamo.eval_frame.OptimizedModule )
def a__ ( A__, A__ = True ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE_ : List[str] = is_compiled_module(A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[Any] = model
SCREAMING_SNAKE_CASE_ : Dict = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : int = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE_ : str = getattr(A__, 'forward' )
SCREAMING_SNAKE_CASE_ : Any = model.__dict__.pop('_original_forward', A__ )
if original_forward is not None:
while hasattr(A__, '__wrapped__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE_ : Any = forward
if getattr(A__, '_converted_to_transformer_engine', A__ ):
convert_model(A__, to_transformer_engine=A__ )
if is_compiled:
SCREAMING_SNAKE_CASE_ : List[str] = model
SCREAMING_SNAKE_CASE_ : Dict = compiled_model
return model
def a__ ( ):
PartialState().wait_for_everyone()
def a__ ( A__, A__ ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(A__, A__ )
elif PartialState().local_process_index == 0:
torch.save(A__, A__ )
@contextmanager
def a__ ( **A__ ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE_ : List[Any] = str(A__ )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def a__ ( A__ ):
if not hasattr(A__, '__qualname__' ) and not hasattr(A__, '__name__' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(A__, '__class__', A__ )
if hasattr(A__, '__qualname__' ):
return obj.__qualname__
if hasattr(A__, '__name__' ):
return obj.__name__
return str(A__ )
def a__ ( A__, A__ ):
for key, value in source.items():
if isinstance(A__, A__ ):
SCREAMING_SNAKE_CASE_ : Dict = destination.setdefault(A__, {} )
merge_dicts(A__, A__ )
else:
SCREAMING_SNAKE_CASE_ : Tuple = value
return destination
def a__ ( A__ = None ):
if port is None:
SCREAMING_SNAKE_CASE_ : Tuple = 2_9_5_0_0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 162
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowerCamelCase__ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : Any =field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
__lowerCamelCase : Optional[int] =field(
default=UpperCAmelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
__lowerCamelCase : Union[str, Any] =field(
default=UpperCAmelCase_ , metadata={'help': 'The column name of the images in the files.'} )
__lowerCamelCase : Tuple =field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the training data.'} )
__lowerCamelCase : int =field(default=UpperCAmelCase_ , metadata={'help': 'A folder containing the validation data.'} )
__lowerCamelCase : Optional[Any] =field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
__lowerCamelCase : str =field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase : Optional[int] =field(
default=UpperCAmelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = {}
if self.train_dir is not None:
__a = self.train_dir
if self.validation_dir is not None:
__a = self.validation_dir
__a = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
__lowerCamelCase : Union[str, Any] =field(
default=UpperCAmelCase_ , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
__lowerCamelCase : Union[str, Any] =field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
__lowerCamelCase : Dict =field(
default=UpperCAmelCase_ , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
__lowerCamelCase : Dict =field(
default=UpperCAmelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
__lowerCamelCase : Tuple =field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase : Tuple =field(default=UpperCAmelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
__lowerCamelCase : List[Any] =field(
default=UpperCAmelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__lowerCamelCase : Optional[Any] =field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
__lowerCamelCase : Union[str, Any] =field(
default=UpperCAmelCase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class SCREAMING_SNAKE_CASE ( UpperCAmelCase_ ):
__lowerCamelCase : Tuple =field(
default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
__a = torch.stack([example["""pixel_values"""] for example in examples] )
return {"pixel_values": pixel_values}
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_mae""" , __A , __A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(__A )
transformers.utils.logging.set_verbosity(__A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Initialize our dataset.
__a = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__a = None if """validation""" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __A ) and data_args.train_val_split > 0.0:
__a = ds["""train"""].train_test_split(data_args.train_val_split )
__a = split["""train"""]
__a = split["""test"""]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
__a = ViTMAEConfig.from_pretrained(model_args.config_name , **__A )
elif model_args.model_name_or_path:
__a = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__A )
else:
__a = ViTMAEConfig()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f"Overriding config: {model_args.config_overrides}" )
config.update_from_string(model_args.config_overrides )
logger.info(f"New config: {config}" )
# adapt config
config.update(
{
"""mask_ratio""": model_args.mask_ratio,
"""norm_pix_loss""": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
__a = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__A )
elif model_args.model_name_or_path:
__a = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__A )
else:
__a = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
__a = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
__a = ViTMAEForPreTraining(__A )
if training_args.do_train:
__a = ds["""train"""].column_names
else:
__a = ds["""validation"""].column_names
if data_args.image_column_name is not None:
__a = data_args.image_column_name
elif "image" in column_names:
__a = """image"""
elif "img" in column_names:
__a = """img"""
else:
__a = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
__a = image_processor.size["""shortest_edge"""]
else:
__a = (image_processor.size["""height"""], image_processor.size["""width"""])
__a = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert("""RGB""" ) if img.mode != "RGB" else img ),
RandomResizedCrop(__A , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_SCREAMING_SNAKE_CASE : Optional[int] ):
__a = [transforms(__A ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__a = ds["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(__A )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__a = (
ds["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(__A )
# Compute absolute learning rate
__a = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
__a = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
__a = Trainer(
model=__A , args=__A , train_dataset=ds["""train"""] if training_args.do_train else None , eval_dataset=ds["""validation"""] if training_args.do_eval else None , tokenizer=__A , data_collator=__A , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=__A )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__a = trainer.evaluate()
trainer.log_metrics("""eval""" , __A )
trainer.save_metrics("""eval""" , __A )
# Write model card and (optionally) push to hub
__a = {
"""tasks""": """masked-auto-encoding""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""masked-auto-encoding"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A )
else:
trainer.create_model_card(**__A )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 302
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
a__ : Union[str, Any] = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = '''albert'''
def __init__( self , lowercase=3_0_0_0_0 , lowercase=1_2_8 , lowercase=4_0_9_6 , lowercase=1_2 , lowercase=1 , lowercase=6_4 , lowercase=1_6_3_8_4 , lowercase=1 , lowercase="gelu_new" , lowercase=0 , lowercase=0 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0.1 , lowercase="absolute" , lowercase=0 , lowercase=2 , lowercase=3 , **lowercase , ) -> Any:
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
__UpperCamelCase = vocab_size
__UpperCamelCase = embedding_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_hidden_groups
__UpperCamelCase = num_attention_heads
__UpperCamelCase = inner_group_num
__UpperCamelCase = hidden_act
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = type_vocab_size
__UpperCamelCase = initializer_range
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = classifier_dropout_prob
__UpperCamelCase = position_embedding_type
class UpperCAmelCase__ ( UpperCAmelCase_):
@property
def __lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__UpperCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__UpperCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 349
| 0
|
'''simple docstring'''
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = VQModel
a = """sample"""
@property
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Tuple=(32, 32) ):
lowerCamelCase__ : List[Any] = 4
lowerCamelCase__ : Dict = 3
lowerCamelCase__ : Tuple = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
return {"sample": image}
@property
def lowerCamelCase_ ( self: str ):
return (3, 32, 32)
@property
def lowerCamelCase_ ( self: Optional[int] ):
return (3, 32, 32)
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Optional[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
lowerCamelCase__ : Tuple = self.dummy_input
return init_dict, inputs_dict
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Any ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : str = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : List[str] = VQModel.from_pretrained("""fusing/vqgan-dummy""" )
model.to(UpperCamelCase__ ).eval()
torch.manual_seed(0 )
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0 )
lowerCamelCase__ : Optional[int] = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size )
lowerCamelCase__ : Tuple = image.to(UpperCamelCase__ )
with torch.no_grad():
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ ).sample
lowerCamelCase__ : Union[str, Any] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
lowerCamelCase__ : Optional[Any] = torch.tensor([-0.0_153, -0.4_044, -0.1_880, -0.5_161, -0.2_418, -0.4_072, -0.1_612, -0.0_633, -0.0_143] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 ) )
| 353
|
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_A : str =WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : int = test_results.split(""" """ )
lowerCamelCase__ : Optional[int] = 0
lowerCamelCase__ : Any = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase__ : Union[str, Any] = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCamelCase ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Tuple:
lowerCamelCase__ : Optional[Any] = {}
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = False
for line in failures_short_lines.split("""\n""" ):
if re.search(r"""_ \[doctest\]""" , UpperCamelCase ):
lowerCamelCase__ : Dict = True
lowerCamelCase__ : Optional[int] = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
lowerCamelCase__ : List[str] = line
lowerCamelCase__ : int = False
return failures
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str , UpperCamelCase__: Dict ):
lowerCamelCase__ : Union[str, Any] = title
lowerCamelCase__ : Tuple = doc_test_results["""time_spent"""].split(""",""" )[0]
lowerCamelCase__ : Union[str, Any] = doc_test_results["""success"""]
lowerCamelCase__ : List[Any] = doc_test_results["""failures"""]
lowerCamelCase__ : List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase__ : str = doc_test_results
@property
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Union[str, Any] = [self._time_spent]
lowerCamelCase__ : Tuple = 0
for time in time_spent:
lowerCamelCase__ : Tuple = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(UpperCamelCase__ ) == 1:
lowerCamelCase__ : Tuple = [0, 0, time_parts[0]]
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return F'''{int(UpperCamelCase__ )}h{int(UpperCamelCase__ )}m{int(UpperCamelCase__ )}s'''
@property
def lowerCamelCase_ ( self: Dict ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F'''🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.''',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F'''There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'''
F''' {self.time}.'''
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
@property
def lowerCamelCase_ ( self: Optional[Any] ):
lowerCamelCase__ : Any = 40
lowerCamelCase__ : List[str] = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(UpperCamelCase__ , UpperCamelCase__ )}
lowerCamelCase__ : List[Any] = """"""
for category, failures in category_failures.items():
if len(UpperCamelCase__ ) == 0:
continue
if report != "":
report += "\n\n"
report += F'''*{category} failures*:'''.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(UpperCamelCase__ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F'''The following examples had failures:\n\n\n{report}\n''',
},
}
@property
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Union[str, Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(UpperCamelCase__ )
@staticmethod
def lowerCamelCase_ ( ):
lowerCamelCase__ : List[Any] = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": F'''https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}''',
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(UpperCamelCase__ )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any ):
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
lowerCamelCase__ : Any = F'''{self.n_failures} failures out of {self.n_tests} tests,''' if self.n_failures else """All tests passed."""
lowerCamelCase__ : List[str] = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=UpperCamelCase__ , )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] ):
lowerCamelCase__ : Optional[Any] = """"""
for key, value in failures.items():
lowerCamelCase__ : int = value[:200] + """ [Truncated]""" if len(UpperCamelCase__ ) > 250 else value
failures_text += F'''*{key}*\n_{value}_\n\n'''
lowerCamelCase__ : Tuple = job_name
lowerCamelCase__ : Union[str, Any] = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
lowerCamelCase__ : Union[str, Any] = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCamelCase_ ( self: Tuple ):
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
lowerCamelCase__ : int = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
lowerCamelCase__ : List[Any] = sorted(self.doc_test_results.items() , key=lambda UpperCamelCase__ : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
lowerCamelCase__ : Union[str, Any] = F'''*Num failures* :{len(job_result['failed'] )} \n'''
lowerCamelCase__ : Union[str, Any] = job_result["""failures"""]
lowerCamelCase__ : int = self.get_reply_blocks(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , text=UpperCamelCase__ )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=F'''Results for {job}''' , blocks=UpperCamelCase__ , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def SCREAMING_SNAKE_CASE_ () -> Tuple:
lowerCamelCase__ : Any = os.environ["""GITHUB_RUN_ID"""]
lowerCamelCase__ : List[Any] = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'''
lowerCamelCase__ : Optional[int] = requests.get(UpperCamelCase ).json()
lowerCamelCase__ : List[Any] = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowerCamelCase__ : Any = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(UpperCamelCase ):
lowerCamelCase__ : List[Any] = requests.get(url + f'''&page={i + 2}''' ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , UpperCamelCase )
return {}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
lowerCamelCase__ : int = {}
if os.path.exists(UpperCamelCase ):
lowerCamelCase__ : List[str] = os.listdir(UpperCamelCase )
for file in files:
try:
with open(os.path.join(UpperCamelCase , UpperCamelCase ) , encoding="""utf-8""" ) as f:
lowerCamelCase__ : List[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f'''Could not open {os.path.join(UpperCamelCase , UpperCamelCase )}.''' ) from e
return _artifact
def SCREAMING_SNAKE_CASE_ () -> Optional[Any]:
class _lowercase :
def __init__( self: Tuple , UpperCamelCase__: str ):
lowerCamelCase__ : Any = name
lowerCamelCase__ : Union[str, Any] = []
def __str__( self: int ):
return self.name
def lowerCamelCase_ ( self: Tuple , UpperCamelCase__: str ):
self.paths.append({"""name""": self.name, """path""": path} )
lowerCamelCase__ : Dict[str, Artifact] = {}
lowerCamelCase__ : List[str] = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase__ : Union[str, Any] = directory
if artifact_name not in _available_artifacts:
lowerCamelCase__ : Optional[int] = Artifact(UpperCamelCase )
_available_artifacts[artifact_name].add_path(UpperCamelCase )
return _available_artifacts
if __name__ == "__main__":
_A : Any =get_job_links()
_A : str =retrieve_available_artifacts()
_A : int =collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_A : Union[str, Any] ={
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_A : Union[str, Any] =github_actions_job_links.get('''run_doctests''')
_A : Any =available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_A : Dict =retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_A , _A , _A : Optional[int] =handle_test_results(artifact['''stats'''])
_A : Union[str, Any] =failed
_A : int =success
_A : Optional[int] =time_spent[1:-1] + ''', '''
_A : Any =extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_A : List[Any] =line.replace('''FAILED ''', '''''')
_A : Any =line.split()[0].replace('''\n''', '''''')
if "::" in line:
_A , _A : Any =line.split('''::''')
else:
_A , _A : Tuple =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_A : str =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_A : str =all_failures[test] if test in all_failures else '''N/A'''
_A : Tuple =failure
break
_A : Union[str, Any] =Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 129
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 251
|
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
UpperCAmelCase_ = '\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",\n author = "Lin, Chin-Yew and\n Och, Franz Josef",\n booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",\n month = "aug 23{--}aug 27",\n year = "2004",\n address = "Geneva, Switzerland",\n publisher = "COLING",\n url = "https://www.aclweb.org/anthology/C04-1072",\n pages = "501--507",\n}\n'
UpperCAmelCase_ = '\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,\nthe better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n'
UpperCAmelCase_ = '\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n \'bleu\': bleu score,\n \'precisions\': geometric mean of n-gram precisions,\n \'brevity_penalty\': brevity penalty,\n \'length_ratio\': ratio of lengths,\n \'translation_length\': translation_length,\n \'reference_length\': reference_length\nExamples:\n\n >>> predictions = [\n ... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample\n ... ["foo", "bar", "foobar"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)\n ... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric("bleu")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results["bleu"])\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : Union[str, Any]=False ):
"""simple docstring"""
UpperCAmelCase__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 346
| 0
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _lowercase ( unittest.TestCase ):
def __init__( self : List[Any] , snake_case : int , snake_case : Union[str, Any]=2 , snake_case : Optional[Any]=5_6 , snake_case : Dict=True , snake_case : Optional[Any]=True , snake_case : Any=True , snake_case : List[Any]=True , snake_case : Tuple=9_9 , snake_case : Any=3_2 , snake_case : List[Any]=2 , snake_case : Optional[Any]=2 , snake_case : str=7 , snake_case : Dict="gelu_new" , snake_case : List[str]=0.1 , snake_case : Dict=0.1 , snake_case : Optional[Any]=5_1_2 , snake_case : Tuple=1_6 , snake_case : Dict=2 , snake_case : List[str]=0.02 , snake_case : Optional[int]=4 , snake_case : str="block_sparse" , snake_case : List[Any]=True , snake_case : int=False , snake_case : Tuple=2 , snake_case : Optional[int]=3 , ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = parent
UpperCamelCase_ : str = batch_size
UpperCamelCase_ : List[str] = seq_length
UpperCamelCase_ : Union[str, Any] = is_training
UpperCamelCase_ : Dict = use_attention_mask
UpperCamelCase_ : List[Any] = use_token_type_ids
UpperCamelCase_ : Optional[Any] = use_labels
UpperCamelCase_ : Dict = vocab_size
UpperCamelCase_ : Union[str, Any] = hidden_size
UpperCamelCase_ : Optional[Any] = num_hidden_layers
UpperCamelCase_ : Any = num_attention_heads
UpperCamelCase_ : Optional[Any] = intermediate_size
UpperCamelCase_ : Optional[Any] = hidden_act
UpperCamelCase_ : int = hidden_dropout_prob
UpperCamelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCamelCase_ : List[str] = max_position_embeddings
UpperCamelCase_ : List[Any] = type_vocab_size
UpperCamelCase_ : Any = type_sequence_label_size
UpperCamelCase_ : Optional[int] = initializer_range
UpperCamelCase_ : int = num_choices
UpperCamelCase_ : str = rescale_embeddings
UpperCamelCase_ : List[Any] = attention_type
UpperCamelCase_ : Optional[Any] = use_bias
UpperCamelCase_ : List[str] = block_size
UpperCamelCase_ : int = num_random_blocks
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : str = None
if self.use_attention_mask:
UpperCamelCase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ : int = None
if self.use_token_type_ids:
UpperCamelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ : Tuple = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ : Any = self.prepare_config_and_inputs()
UpperCamelCase_ : int = config_and_inputs
UpperCamelCase_ : Tuple = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'attention_mask': attention_mask,
}
return config, inputs_dict
@require_flax
class _lowercase ( snake_case_ , unittest.TestCase ):
lowercase = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
UpperCamelCase_ : List[str] = FlaxBigBirdModelTester(self )
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Tuple:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCamelCase_ : Optional[Any] = model_class_name.from_pretrained('google/bigbird-roberta-base' )
self.assertIsNotNone(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> int:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCamelCase_ : Optional[Any] = self._prepare_for_class(snake_case , snake_case )
UpperCamelCase_ : Optional[Any] = model_class(snake_case )
@jax.jit
def model_jitted(snake_case : str , snake_case : List[str]=None , **snake_case : Tuple ):
return model(input_ids=snake_case , attention_mask=snake_case , **snake_case )
with self.subTest('JIT Enabled' ):
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
UpperCamelCase_ : List[str] = model_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Optional[Any] , snake_case : Any , snake_case : Optional[Any] , snake_case : Optional[int]=1e-5 , snake_case : Tuple="outputs" , snake_case : Dict=None ) -> Dict:
"""simple docstring"""
if name.startswith('outputs.attentions' ):
return
else:
super().check_pt_flax_outputs(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
| 367
|
from __future__ import annotations
import numpy as np
def __lowercase ( lowerCamelCase : list[float] ):
return np.maximum(0 , lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 50
| 0
|
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29
|
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head:
return True
# split the list to two parts
UpperCAmelCase_ , UpperCAmelCase_ : Any = head.next, head
while fast and fast.next:
UpperCAmelCase_ : str = fast.next.next
UpperCAmelCase_ : Union[str, Any] = slow.next
UpperCAmelCase_ : int = slow.next
UpperCAmelCase_ : List[Any] = None # Don't forget here! But forget still works!
# reverse the second part
UpperCAmelCase_ : Tuple = None
while second:
UpperCAmelCase_ : int = second.next
UpperCAmelCase_ : Any = node
UpperCAmelCase_ : Optional[Any] = second
UpperCAmelCase_ : Tuple = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
UpperCAmelCase_ : Optional[Any] = node.next
UpperCAmelCase_ : Dict = head.next
return True
def lowercase__ ( __snake_case : Union[str, Any] ):
'''simple docstring'''
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
UpperCAmelCase_ : Any = head
while fast and fast.next:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = fast.next.next, slow.next
# 2. Push the second half into the stack
UpperCAmelCase_ : List[str] = [slow.val]
while slow.next:
UpperCAmelCase_ : List[str] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
UpperCAmelCase_ : int = cur.next
return True
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
if not head or not head.next:
return True
UpperCAmelCase_ : Tuple = {}
UpperCAmelCase_ : int = 0
while head:
if head.val in d:
d[head.val].append(__snake_case )
else:
UpperCAmelCase_ : List[Any] = [pos]
UpperCAmelCase_ : Any = head.next
pos += 1
UpperCAmelCase_ : Dict = pos - 1
UpperCAmelCase_ : Optional[int] = 0
for v in d.values():
if len(__snake_case ) % 2 != 0:
middle += 1
else:
UpperCAmelCase_ : int = 0
for i in range(0 , len(__snake_case ) ):
if v[i] + v[len(__snake_case ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 29
| 1
|
from __future__ import annotations
import math
def _UpperCAmelCase (UpperCamelCase_ : int ):
'''simple docstring'''
if num <= 0:
_lowerCAmelCase : Dict = F"{num}: Invalid input, please enter a positive integer."
raise ValueError(UpperCamelCase_ )
_lowerCAmelCase : List[str] = [True] * (num + 1)
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : Any = int(math.sqrt(UpperCamelCase_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(UpperCamelCase_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , UpperCamelCase_ ):
if sieve[i] is True:
_lowerCAmelCase : List[Any] = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(UpperCamelCase_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 352
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159
| 0
|
import copy
import re
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "hp"
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = None
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase , __lowerCamelCase) -> List[str]:
_A : Any = prefix
_A : Any = defaults
cls.build_naming_info()
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> str:
if len(UpperCamelCase__) == 0:
return ""
_A : Any = None
if any(char.isdigit() for char in word):
raise Exception(F"Parameters should not contain numbers: \'{word}\' contains a number")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(UpperCamelCase__) + 1):
_A : Tuple = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_A : List[Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__lowerCamelCase):
_A : str = ""
while integer != 0:
_A : int = chr(ord("A") + integer % 1_0) + s
integer //= 1_0
return s
_A : List[Any] = 0
while True:
_A : Optional[Any] = word + "#" + int_to_alphabetic(UpperCamelCase__)
if sword in info["reverse_short_word"]:
continue
else:
_A : List[Any] = sword
break
_A : List[str] = short_word
_A : Dict = word
return short_word
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> str:
_A : Optional[int] = param_name.split("_")
_A : Optional[Any] = [TrialShortNamer.shortname_for_word(UpperCamelCase__ , UpperCamelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_A : int = ["", "_"]
for separator in separators:
_A : Tuple = separator.join(UpperCamelCase__)
if shortname not in info["reverse_short_param"]:
_A : Any = shortname
_A : List[str] = param_name
return shortname
return param_name
@staticmethod
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
_A : Tuple = TrialShortNamer.shortname_for_key(UpperCamelCase__ , UpperCamelCase__)
_A : Optional[Any] = short_name
_A : Union[str, Any] = param_name
@classmethod
def _lowerCamelCase ( cls) -> str:
if cls.NAMING_INFO is not None:
return
_A : List[Any] = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_A : Dict = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(UpperCamelCase__ , UpperCamelCase__)
_A : Optional[int] = info
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> List[str]:
cls.build_naming_info()
assert cls.PREFIX is not None
_A : List[str] = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"You should provide a default value for the param name {k} with value {v}")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_A : Optional[int] = cls.NAMING_INFO["short_param"][k]
if isinstance(UpperCamelCase__ , UpperCamelCase__):
_A : List[Any] = 1 if v else 0
_A : List[Any] = "" if isinstance(UpperCamelCase__ , (int, float)) else "-"
_A : str = F"{key}{sep}{v}"
name.append(UpperCamelCase__)
return "_".join(UpperCamelCase__)
@classmethod
def _lowerCamelCase ( cls , __lowerCamelCase) -> Optional[int]:
_A : List[Any] = repr[len(cls.PREFIX) + 1 :]
if repr == "":
_A : Union[str, Any] = []
else:
_A : Union[str, Any] = repr.split("_")
_A : int = {}
for value in values:
if "-" in value:
_A , _A : int = value.split("-")
else:
_A : Any = re.sub("[0-9.]" , "" , UpperCamelCase__)
_A : Optional[Any] = float(re.sub("[^0-9.]" , "" , UpperCamelCase__))
_A : int = cls.NAMING_INFO["reverse_short_param"][p_k]
_A : List[str] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_A : List[Any] = cls.DEFAULTS[k]
return parameters
| 11
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
__lowerCamelCase = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 162
| 0
|
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _UpperCAmelCase ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
'''simple docstring'''
def __init__(self , a_=None , **a_ ):
'''simple docstring'''
super().__init__(features=a_ )
__snake_case : Dict = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__snake_case : str = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__snake_case : Dict = {'''dtype''': torch.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__snake_case : Tuple = {'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
__snake_case : Any = np.asarray(a_ )
return torch.tensor(a_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(a_ , '''__array__''' ) and not isinstance(a_ , torch.Tensor ):
__snake_case : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_row(a_ )
__snake_case : Dict = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.numpy_arrow_extractor().extract_column(a_ )
__snake_case : Dict = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
__snake_case : List[Any] = self.recursive_tensorize(a_ )
__snake_case : Dict = self._consolidate(a_ )
return column
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[str] = self.numpy_arrow_extractor().extract_batch(a_ )
__snake_case : Tuple = self.python_features_decoder.decode_batch(a_ )
__snake_case : str = self.recursive_tensorize(a_ )
for column_name in batch:
__snake_case : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 24
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 96
|
import math
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
elif number == 1:
return 3
elif number == 2:
return 5
else:
lowerCAmelCase__ : Optional[Any] = int(math.log(number // 3 ,2)) + 2
lowerCAmelCase__ : Optional[Any] = [3, 5]
lowerCAmelCase__ : List[Any] = 2
lowerCAmelCase__ : Tuple = 3
for block in range(1 ,lowerCamelCase_):
for _ in range(lowerCamelCase_):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(1_1):
__snake_case : Optional[int] =0
try:
__snake_case : List[Any] =proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 129
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = 42
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase ):
@register_to_config
def __init__( self , __a = 16 , __a = 88 , __a = None , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = "geglu" , __a = True , __a = True , ) -> List[Any]:
'''simple docstring'''
super().__init__()
_UpperCamelCase = num_attention_heads
_UpperCamelCase = attention_head_dim
_UpperCamelCase = num_attention_heads * attention_head_dim
_UpperCamelCase = in_channels
_UpperCamelCase = torch.nn.GroupNorm(num_groups=__a , num_channels=__a , eps=1e-6 , affine=__a)
_UpperCamelCase = nn.Linear(__a , __a)
# 3. Define transformers blocks
_UpperCamelCase = nn.ModuleList(
[
BasicTransformerBlock(
__a , __a , __a , dropout=__a , cross_attention_dim=__a , activation_fn=__a , attention_bias=__a , double_self_attention=__a , norm_elementwise_affine=__a , )
for d in range(__a)
])
_UpperCamelCase = nn.Linear(__a , __a)
def UpperCAmelCase ( self , __a , __a=None , __a=None , __a=None , __a=1 , __a=None , __a = True , ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = hidden_states.shape
_UpperCamelCase = batch_frames // num_frames
_UpperCamelCase = hidden_states
_UpperCamelCase = hidden_states[None, :].reshape(__a , __a , __a , __a , __a)
_UpperCamelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4)
_UpperCamelCase = self.norm(__a)
_UpperCamelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1).reshape(batch_size * height * width , __a , __a)
_UpperCamelCase = self.proj_in(__a)
# 2. Blocks
for block in self.transformer_blocks:
_UpperCamelCase = block(
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , class_labels=__a , )
# 3. Output
_UpperCamelCase = self.proj_out(__a)
_UpperCamelCase = (
hidden_states[None, None, :]
.reshape(__a , __a , __a , __a , __a)
.permute(0 , 3 , 4 , 1 , 2)
.contiguous()
)
_UpperCamelCase = hidden_states.reshape(__a , __a , __a , __a)
_UpperCamelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__a)
| 100
|
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCAmelCase:
def __init__( self , __a , __a=13 , __a=7 , __a=True , __a=True , __a=False , __a=True , __a=99 , __a=32 , __a=5 , __a=4 , __a=37 , __a="gelu" , __a=0.1 , __a=0.1 , __a=5_12 , __a=16 , __a=2 , __a=0.02 , __a=3 , __a=4 , __a=None , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices)
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a)
_UpperCamelCase = model(__a)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , __a , __a , __a , __a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(config=__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a)
model.to(__a)
model.eval()
# create attention mask
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
_UpperCamelCase = self.seq_length // 2
_UpperCamelCase = 0
# first forward pass
_UpperCamelCase , _UpperCamelCase = model(__a , attention_mask=__a).to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
_UpperCamelCase = ids_tensor((1,) , __a).item() + 1
_UpperCamelCase = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
_UpperCamelCase = random_other_next_tokens
# append to next input_ids and attn_mask
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__a)] , dim=1 , )
# get two different outputs
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , past_key_values=__a , attention_mask=__a)['''last_hidden_state''']
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = BioGptModel(config=__a).to(__a).eval()
_UpperCamelCase = torch.ones(input_ids.shape , dtype=torch.long , device=__a)
# first forward pass
_UpperCamelCase = model(__a , attention_mask=__a , use_cache=__a)
_UpperCamelCase , _UpperCamelCase = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
_UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size)
_UpperCamelCase = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
_UpperCamelCase = torch.cat([input_ids, next_tokens] , dim=-1)
_UpperCamelCase = torch.cat([attention_mask, next_attn_mask] , dim=-1)
_UpperCamelCase = model(__a , attention_mask=__a)['''last_hidden_state''']
_UpperCamelCase = model(__a , attention_mask=__a , past_key_values=__a)[
'''last_hidden_state'''
]
# select random slice
_UpperCamelCase = ids_tensor((1,) , output_from_past.shape[-1]).item()
_UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
_UpperCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3))
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a , __a=False) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM(__a)
model.to(__a)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
_UpperCamelCase = model(__a , labels=__a)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def UpperCAmelCase ( self , __a , *__a) -> Any:
'''simple docstring'''
_UpperCamelCase = BioGptModel(__a)
_UpperCamelCase = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def UpperCAmelCase ( self , __a , __a , __a , __a , __a , *__a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = BioGptForTokenClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , token_type_ids=__a)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class _UpperCAmelCase( lowerCamelCase , lowerCamelCase , lowerCamelCase , unittest.TestCase ):
lowercase__ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowercase__ = (BioGptForCausalLM,) if is_torch_available() else ()
lowercase__ = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BioGptModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=__a , hidden_size=37)
def UpperCAmelCase ( self) -> str:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*__a)
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__a , gradient_checkpointing=__a)
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__a)
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__a)
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__a)
@slow
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = '''left'''
# Define PAD Token = EOS Token = 50256
_UpperCamelCase = tokenizer.eos_token
_UpperCamelCase = model.config.eos_token_id
# use different length sentences to test batching
_UpperCamelCase = [
'''Hello, my dog is a little''',
'''Today, I''',
]
_UpperCamelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a)
_UpperCamelCase = inputs['''input_ids'''].to(__a)
_UpperCamelCase = model.generate(
input_ids=__a , attention_mask=inputs['''attention_mask'''].to(__a) , )
_UpperCamelCase = tokenizer(sentences[0] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a)
_UpperCamelCase = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
_UpperCamelCase = tokenizer(sentences[1] , return_tensors='''pt''').input_ids.to(__a)
_UpperCamelCase = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings)
_UpperCamelCase = tokenizer.batch_decode(__a , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=__a)
_UpperCamelCase = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__a , __a)
self.assertListEqual(__a , [non_padded_sentence, padded_sentence])
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = BioGptModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def UpperCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = 3
_UpperCamelCase = '''multi_label_classification'''
_UpperCamelCase = input_dict['''input_ids''']
_UpperCamelCase = input_ids.ne(1).to(__a)
_UpperCamelCase = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
_UpperCamelCase = BioGptForSequenceClassification(__a)
model.to(__a)
model.eval()
_UpperCamelCase = model(__a , attention_mask=__a , labels=__a)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class _UpperCAmelCase( unittest.TestCase ):
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = torch.tensor([[2, 48_05, 9, 6_56, 21]])
_UpperCamelCase = model(__a)[0]
_UpperCamelCase = 4_23_84
_UpperCamelCase = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __a)
_UpperCamelCase = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4))
@slow
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = BioGptTokenizer.from_pretrained('''microsoft/biogpt''')
_UpperCamelCase = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''')
model.to(__a)
torch.manual_seed(0)
_UpperCamelCase = tokenizer('''COVID-19 is''' , return_tensors='''pt''').to(__a)
_UpperCamelCase = model.generate(
**__a , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=__a , )
_UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=__a)
_UpperCamelCase = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__a , __a)
| 100
| 1
|
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case = 16
_snake_case = 32
def lowerCAmelCase_ ( snake_case_,snake_case_ = 16 ):
_A : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_A : int = load_dataset("""glue""","""mrpc""" )
def tokenize_function(snake_case_ ):
# max_length=None => use the model max length (it's actually the default)
_A : Optional[Any] = tokenizer(examples["""sentence1"""],examples["""sentence2"""],truncation=_UpperCAmelCase,max_length=_UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_A : List[str] = datasets.map(
_UpperCAmelCase,batched=_UpperCAmelCase,remove_columns=["""idx""", """sentence1""", """sentence2"""],)
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_A : Tuple = tokenized_datasets.rename_column("""label""","""labels""" )
def collate_fn(snake_case_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_A : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_A : Dict = 16
elif accelerator.mixed_precision != "no":
_A : List[Any] = 8
else:
_A : Optional[int] = None
return tokenizer.pad(
_UpperCAmelCase,padding="""longest""",max_length=_UpperCAmelCase,pad_to_multiple_of=_UpperCAmelCase,return_tensors="""pt""",)
# Instantiate dataloaders.
_A : Tuple = DataLoader(
tokenized_datasets["""train"""],shuffle=_UpperCAmelCase,collate_fn=_UpperCAmelCase,batch_size=_UpperCAmelCase )
_A : Any = DataLoader(
tokenized_datasets["""validation"""],shuffle=_UpperCAmelCase,collate_fn=_UpperCAmelCase,batch_size=_UpperCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_snake_case = mocked_dataloaders # noqa: F811
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""",_UpperCAmelCase ) == "1":
_A : Any = 2
# Initialize accelerator
_A : str = Accelerator(cpu=args.cpu,mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_A : str = config['lr']
_A : Dict = int(config["""num_epochs"""] )
_A : int = int(config["""seed"""] )
_A : int = int(config["""batch_size"""] )
_A : str = evaluate.load("""glue""","""mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_UpperCAmelCase )
def inner_training_loop(snake_case_ ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_UpperCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_A : Tuple = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""",return_dict=_UpperCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_A : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
_A : List[Any] = AdamW(params=model.parameters(),lr=_UpperCAmelCase )
_A : Tuple = get_dataloaders(_UpperCAmelCase,_UpperCAmelCase )
# Instantiate scheduler
_A : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=_UpperCAmelCase,num_warmup_steps=100,num_training_steps=(len(_UpperCAmelCase ) * num_epochs),)
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_A : Optional[Any] = accelerator.prepare(
_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase )
# Now we train the model
for epoch in range(_UpperCAmelCase ):
model.train()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_A : List[str] = model(**_UpperCAmelCase )
_A : Union[str, Any] = outputs.loss
accelerator.backward(_UpperCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_A : List[str] = model(**_UpperCAmelCase )
_A : Union[str, Any] = outputs.logits.argmax(dim=-1 )
_A : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_UpperCAmelCase,references=_UpperCAmelCase,)
_A : Tuple = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''',_UpperCAmelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowerCAmelCase_ ( ):
_A : Dict = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""",type=_UpperCAmelCase,default=_UpperCAmelCase,choices=["""no""", """fp16""", """bf16""", """fp8"""],help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""",)
parser.add_argument("""--cpu""",action="""store_true""",help="""If passed, will train on the CPU.""" )
_A : Union[str, Any] = parser.parse_args()
_A : int = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCAmelCase,_UpperCAmelCase )
if __name__ == "__main__":
main()
| 26
|
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
_UpperCAmelCase : str = pytest.mark.integration
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : List[Any] ) -> Union[str, Any]:
lowerCamelCase__ : List[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Optional[Any] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
lowerCamelCase__ : List[Any] = dset.map(
lambda UpperCAmelCase , UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=UpperCAmelCase , keep_in_memory=UpperCAmelCase )
lowerCamelCase__ : Tuple = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Union[str, Any] ) -> int:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[str] ) -> Tuple:
import faiss
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Any ) -> Optional[Any]:
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(UpperCAmelCase , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : Dict ) -> Dict:
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : List[Any] = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : int = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : List[str] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=UpperCAmelCase )
lowerCamelCase__ , lowerCamelCase__ : Dict = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Any ) -> Dict:
import faiss
lowerCamelCase__ : Tuple = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : int = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Any = 1
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = index.search(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : str = np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict = index.search_batch(UpperCAmelCase )
self.assertRaises(UpperCAmelCase , index.search_batch , queries[0] )
lowerCamelCase__ : str = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , UpperCAmelCase )
def A_ ( self : List[Any] ) -> List[Any]:
import faiss
lowerCamelCase__ : Any = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Tuple = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(UpperCAmelCase ):
lowerCamelCase__ : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : List[str] ) -> Optional[int]:
import faiss
lowerCamelCase__ : Optional[Any] = faiss.IndexFlat(5 )
lowerCamelCase__ : int = FaissIndex(custom_index=UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : Any ) -> Optional[int]:
import faiss
lowerCamelCase__ : Any = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : List[str] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Tuple = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Any:
import faiss
lowerCamelCase__ : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] = 'index.faiss'
lowerCamelCase__ : Optional[Any] = F"""mock://{index_name}"""
index.save(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Tuple = FaissIndex.load(_UpperCAmelCase , storage_options=mockfs.storage_options )
lowerCamelCase__ : Optional[int] = np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict = 1
lowerCamelCase__ , lowerCamelCase__ : str = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Dict ) -> List[Any]:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any = Elasticsearch()
lowerCamelCase__ : Tuple = {'acknowledged': True}
lowerCamelCase__ : Tuple = ElasticSearchIndex(es_client=UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Optional[int] = 'foo'
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search(UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : Any = 'foo'
lowerCamelCase__ : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : Tuple = index.search(UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] = ['foo', 'bar', 'foobar']
lowerCamelCase__ : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : str = index.search_batch(UpperCAmelCase )
lowerCamelCase__ : List[str] = [scores[0] for scores in total_scores]
lowerCamelCase__ : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
# batched queries with timeout
lowerCamelCase__ : str = ['foo', 'bar', 'foobar']
lowerCamelCase__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = index.search_batch(UpperCAmelCase , request_timeout=30 )
lowerCamelCase__ : Optional[Any] = [scores[0] for scores in total_scores]
lowerCamelCase__ : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , UpperCAmelCase )
| 50
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
def __init__( self : List[Any] ,_a : int ,_a : int ,_a : float ,**_a : List[str] ):
'''simple docstring'''
_a : Union[str, Any] = feature_size
_a : Union[str, Any] = sampling_rate
_a : Optional[Any] = padding_value
_a : Tuple = kwargs.pop('padding_side' ,'right' )
_a : List[Any] = kwargs.pop('return_attention_mask' ,_a )
super().__init__(**_a )
def __lowercase ( self : int ,_a : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] ,_a : Union[bool, str, PaddingStrategy] = True ,_a : Optional[int] = None ,_a : bool = False ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[Union[str, TensorType]] = None ,):
'''simple docstring'''
if isinstance(_a ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
_a : List[Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
_a : Any = processed_features[self.model_input_names[0]]
_a : List[str] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(_a ) == 0:
if return_attention_mask:
_a : Tuple = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_a : List[Any] = required_input[0]
if isinstance(_a ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_a : Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(_a ):
_a : str = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(_a ):
_a : Any = 'tf'
elif is_torch_tensor(_a ):
_a : int = 'pt'
elif isinstance(_a ,(int, float, list, tuple, np.ndarray) ):
_a : Tuple = 'np'
else:
raise ValueError(
F"""type of {first_element} unknown: {type(_a )}. """
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
_a : Optional[Any] = to_numpy(_a )
else:
_a : int = [to_numpy(_a ) for v in value]
# Convert padding_strategy in PaddingStrategy
_a : Dict = self._get_padding_strategies(padding=_a ,max_length=_a )
_a : Optional[Any] = processed_features[self.model_input_names[0]]
_a : Optional[Any] = len(_a )
if not all(len(_a ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
_a : str = []
for i in range(_a ):
_a : Any = {k: v[i] for k, v in processed_features.items()}
# truncation
_a : List[Any] = self._truncate(
_a ,max_length=_a ,pad_to_multiple_of=_a ,truncation=_a ,)
truncated_inputs.append(_a )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_a : Dict = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_a : Union[str, Any] = PaddingStrategy.MAX_LENGTH
_a : List[str] = {}
for i in range(_a ):
# padding
_a : Optional[Any] = self._pad(
truncated_inputs[i] ,max_length=_a ,padding_strategy=_a ,pad_to_multiple_of=_a ,return_attention_mask=_a ,)
for key, value in outputs.items():
if key not in batch_outputs:
_a : List[str] = []
if value.dtype is np.dtype(np.floataa ):
_a : Any = value.astype(np.floataa )
batch_outputs[key].append(_a )
return BatchFeature(_a ,tensor_type=_a )
def __lowercase ( self : Optional[Any] ,_a : Union[Dict[str, np.ndarray], BatchFeature] ,_a : Optional[int] = None ,_a : PaddingStrategy = PaddingStrategy.DO_NOT_PAD ,_a : Optional[int] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
_a : Optional[int] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_a : int = len(_a )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a : Any = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(_a ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_a : List[str] = np.ones(len(_a ) ,dtype=np.intaa )
if needs_to_be_padded:
_a : Any = max_length - len(_a )
if self.padding_side == "right":
if return_attention_mask:
_a : Optional[int] = np.pad(
processed_features['attention_mask'] ,(0, difference) )
_a : int = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_a : Dict = np.pad(
_a ,_a ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_a : Any = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
_a : str = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_a : Union[str, Any] = np.pad(
_a ,_a ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def __lowercase ( self : Dict ,_a : Union[Dict[str, np.ndarray], BatchFeature] ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
_a : str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_a : str = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_a : Dict = len(_a ) > max_length
if needs_to_be_truncated:
_a : str = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_a : str = processed_features['attention_mask'][:max_length]
return processed_features
def __lowercase ( self : Optional[Any] ,_a : Any=False ,_a : Optional[Any]=None ):
'''simple docstring'''
if padding is not False:
if padding is True:
_a : int = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(_a ,_a ):
_a : Tuple = PaddingStrategy(_a )
elif isinstance(_a ,_a ):
_a : int = padding
else:
_a : int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 5
|
'''simple docstring'''
import sys
def UpperCAmelCase_ (__a : List[str] ):
"""simple docstring"""
_a : List[str] = len(__a )
_a : Dict = [[0 for x in range(__a )] for x in range(__a )]
_a : Union[str, Any] = [[0 for x in range(__a )] for x in range(__a )]
for chain_length in range(2 , __a ):
for a in range(1 , n - chain_length + 1 ):
_a : Tuple = a + chain_length - 1
_a : Any = sys.maxsize
for c in range(__a , __a ):
_a : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_a : Dict = cost
_a : Any = c
return matrix, sol
def UpperCAmelCase_ (__a : Tuple , __a : List[str] , __a : Dict ):
"""simple docstring"""
if i == j:
print('A' + str(__a ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(__a , __a , optimal_solution[i][j] )
print_optiomal_solution(__a , optimal_solution[i][j] + 1 , __a )
print(')' , end=' ' )
def UpperCAmelCase_ ():
"""simple docstring"""
_a : Any = [3_0, 3_5, 1_5, 5, 1_0, 2_0, 2_5]
_a : Any = len(__a )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_a, _a : Union[str, Any] = matrix_chain_order(__a )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(__a , 1 , n - 1 )
if __name__ == "__main__":
main()
| 5
| 1
|
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
SCREAMING_SNAKE_CASE_ : Tuple = 6_3_7_8_1_3_7.0
SCREAMING_SNAKE_CASE_ : List[str] = 6_3_5_6_7_5_2.3_1_4_2_4_5
SCREAMING_SNAKE_CASE_ : List[Any] = 6_3_7_8_1_3_7
def _snake_case ( UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowerCAmelCase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowerCAmelCase_ ) ** 2) * (cos(lowerCAmelCase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowerCAmelCase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowerCAmelCase_ ) ** 2) * (sin(lowerCAmelCase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowerCAmelCase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 335
|
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 159
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
_lowerCamelCase : str = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class UpperCamelCase_ ( a__ ):
'''simple docstring'''
UpperCAmelCase__ = '''distilbert'''
UpperCAmelCase__ = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self : str , UpperCAmelCase__ : List[Any]=30_522 , UpperCAmelCase__ : str=512 , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Union[str, Any]=12 , UpperCAmelCase__ : List[str]=768 , UpperCAmelCase__ : Dict=4 * 768 , UpperCAmelCase__ : List[Any]=0.1 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Tuple="gelu" , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : List[str]=0.1 , UpperCAmelCase__ : Union[str, Any]=0.2 , UpperCAmelCase__ : List[str]=0 , **UpperCAmelCase__ : Any , ) ->Dict:
'''simple docstring'''
A__ = vocab_size
A__ = max_position_embeddings
A__ = sinusoidal_pos_embds
A__ = n_layers
A__ = n_heads
A__ = dim
A__ = hidden_dim
A__ = dropout
A__ = attention_dropout
A__ = activation
A__ = initializer_range
A__ = qa_dropout
A__ = seq_classif_dropout
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__)
class UpperCamelCase_ ( a__ ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[Any]:
'''simple docstring'''
if self.task == "multiple-choice":
A__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
A__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
])
| 370
|
from __future__ import annotations
import queue
class UpperCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase__ : Dict) ->Any:
'''simple docstring'''
A__ = data
A__ = None
A__ = None
def SCREAMING_SNAKE_CASE ( ) -> TreeNode:
"""simple docstring"""
print('''\n********Press N to stop entering at any point of time********\n''' )
A__ = input('''Enter the value of the root node: ''' ).strip().lower()
A__ = queue.Queue()
A__ = TreeNode(int(lowercase_ ) )
q.put(lowercase_ )
while not q.empty():
A__ = q.get()
A__ = f"""Enter the left node of {node_found.data}: """
A__ = input(lowercase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
A__ = TreeNode(int(lowercase_ ) )
A__ = left_node
q.put(lowercase_ )
A__ = f"""Enter the right node of {node_found.data}: """
A__ = input(lowercase_ ).strip().lower() or '''n'''
if check == "n":
return tree_node
A__ = TreeNode(int(lowercase_ ) )
A__ = right_node
q.put(lowercase_ )
raise
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = queue.Queue()
q.put(lowercase_ )
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = queue.Queue()
q.put(lowercase_ )
while not q.empty():
A__ = []
while not q.empty():
A__ = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(lowercase_ )
A__ = n.left
# end of while means current node doesn't have left child
A__ = stack.pop()
# start to traverse its right child
A__ = n.right
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ = []
A__ = node
while n or stack:
while n:
stack.append(lowercase_ )
A__ = n.left
A__ = stack.pop()
print(n.data , end=''',''' )
A__ = n.right
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
"""simple docstring"""
if not isinstance(lowercase_ , lowercase_ ) or not node:
return
A__ , A__ = [], []
A__ = node
stacka.append(lowercase_ )
while stacka: # to find the reversed order of post order, store it in stack2
A__ = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowercase_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def SCREAMING_SNAKE_CASE ( lowercase_ = "" , lowercase_=50 , lowercase_="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
A__ , A__ = divmod(width - len(lowercase_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 231
| 0
|
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( snake_case_ : int = 150_0000 ) -> int:
__snake_case = defaultdict(snake_case_ )
__snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
__snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
a : Optional[Any] = list[tuple[int, int]]
a : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a : List[str] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: Any = pos_x
UpperCAmelCase_: List[Any] = pos_y
UpperCAmelCase_: Any = (pos_y, pos_x)
UpperCAmelCase_: List[Any] = goal_x
UpperCAmelCase_: Tuple = goal_y
UpperCAmelCase_: int = parent
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: List[Any] = Node(start[1], start[0], goal[1], goal[0], SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = Node(goal[1], goal[0], goal[1], goal[0], SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [self.start]
UpperCAmelCase_: Dict = False
def __snake_case (self ) -> Path | None:
while self.node_queue:
UpperCAmelCase_: Dict = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_: Optional[Any] = True
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = self.get_successors(SCREAMING_SNAKE_CASE_ )
for node in successors:
self.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.start.pos]
return None
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> list[Node]:
UpperCAmelCase_: Optional[Any] = []
for action in delta:
UpperCAmelCase_: str = parent.pos_x + action[1]
UpperCAmelCase_: List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, self.target.pos_y, self.target.pos_x, SCREAMING_SNAKE_CASE_ ) )
return successors
def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Path:
UpperCAmelCase_: int = node
UpperCAmelCase_: List[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_: Optional[int] = current_node.parent
path.reverse()
return path
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCAmelCase_: List[str] = BreadthFirstSearch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = BreadthFirstSearch(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = False
def __snake_case (self ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_: str = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase_: Optional[int] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_: List[str] = True
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = current_bwd_node
UpperCAmelCase_: List[Any] = current_fwd_node
UpperCAmelCase_: Optional[int] = {
self.fwd_bfs: self.fwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(SCREAMING_SNAKE_CASE_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Path:
UpperCAmelCase_: List[Any] = self.fwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.bwd_bfs.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_: Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a : Dict = (0, 0)
a : Any = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a : Union[str, Any] = time.time()
a : Optional[int] = BreadthFirstSearch(init, goal)
a : Dict = bfs.search()
a : List[str] = time.time() - start_bfs_time
print('Unidirectional BFS computation time : ', bfs_time)
a : Any = time.time()
a : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
a : Any = bd_bfs.search()
a : Tuple = time.time() - start_bd_bfs_time
print('Bidirectional BFS computation time : ', bd_bfs_time)
| 369
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=32 * 8, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=64, ) -> Union[str, Any]:
UpperCAmelCase_: int = parent
UpperCAmelCase_: Tuple = batch_size
UpperCAmelCase_: int = is_training
UpperCAmelCase_: Any = use_auxiliary_loss
UpperCAmelCase_: str = num_queries
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: Union[str, Any] = min_size
UpperCAmelCase_: Optional[Any] = max_size
UpperCAmelCase_: Tuple = num_labels
UpperCAmelCase_: Union[str, Any] = hidden_dim
UpperCAmelCase_: int = hidden_dim
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = torch.ones([self.batch_size, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=SCREAMING_SNAKE_CASE_ ) > 0.5
).float()
UpperCAmelCase_: Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=SCREAMING_SNAKE_CASE_ ) > 0.5).long()
UpperCAmelCase_: Union[str, Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def __snake_case (self ) -> Any:
UpperCAmelCase_: Any = MaskaFormerConfig(
hidden_size=self.hidden_dim, )
UpperCAmelCase_: Any = self.num_queries
UpperCAmelCase_: Dict = self.num_labels
UpperCAmelCase_: Dict = [1, 1, 1, 1]
UpperCAmelCase_: int = self.num_channels
UpperCAmelCase_: Union[str, Any] = 64
UpperCAmelCase_: List[Any] = 128
UpperCAmelCase_: Optional[Any] = self.hidden_dim
UpperCAmelCase_: str = self.hidden_dim
UpperCAmelCase_: List[str] = self.hidden_dim
return config
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.prepare_config_and_inputs()
UpperCAmelCase_: Any = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCAmelCase_: Union[str, Any] = output.encoder_hidden_states
UpperCAmelCase_: int = output.pixel_decoder_hidden_states
UpperCAmelCase_: Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_ ), config.decoder_layers )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False ) -> Optional[Any]:
with torch.no_grad():
UpperCAmelCase_: Dict = MaskaFormerModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: str = model(SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.hidden_dim), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCAmelCase_: Tuple = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_: Dict = model(pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = model(
pixel_values=SCREAMING_SNAKE_CASE_, pixel_mask=SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
comm_check_on_output(SCREAMING_SNAKE_CASE_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
A = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Any:
UpperCAmelCase_: List[str] = MaskaFormerModelTester(self )
UpperCAmelCase_: Any = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
self.config_tester.run_common_tests()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def __snake_case (self ) -> Dict:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def __snake_case (self ) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def __snake_case (self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def __snake_case (self ) -> List[str]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Dict:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: Tuple = [*signature.parameters.keys()]
UpperCAmelCase_: str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_: Any = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: str = (self.model_tester.min_size,) * 2
UpperCAmelCase_: str = {
"""pixel_values""": torch.randn((2, 3, *size), device=SCREAMING_SNAKE_CASE_ ),
"""mask_labels""": torch.randn((2, 10, *size), device=SCREAMING_SNAKE_CASE_ ),
"""class_labels""": torch.zeros(2, 10, device=SCREAMING_SNAKE_CASE_ ).long(),
}
UpperCAmelCase_: Dict = self.model_tester.get_config()
UpperCAmelCase_: Optional[Any] = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, output_hidden_states=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: List[Any] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Any = model(**SCREAMING_SNAKE_CASE_, output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.attentions is not None )
def __snake_case (self ) -> Optional[int]:
if not self.model_tester.is_training:
return
UpperCAmelCase_: Union[str, Any] = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Optional[int] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ ).loss
loss.backward()
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_: Union[str, Any] = True
UpperCAmelCase_: str = True
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
model.train()
UpperCAmelCase_: Union[str, Any] = model(SCREAMING_SNAKE_CASE_, mask_labels=SCREAMING_SNAKE_CASE_, class_labels=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_: Union[str, Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[int] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_: Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a : int = 1E-4
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> Optional[int]:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def __snake_case (self ) -> Dict:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def __snake_case (self ) -> List[str]:
UpperCAmelCase_: int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = self.default_image_processor
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: Dict = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: str = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Tuple = self.default_image_processor
UpperCAmelCase_: Dict = prepare_img()
UpperCAmelCase_: Any = image_processor(SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(SCREAMING_SNAKE_CASE_, (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_: int = model(**SCREAMING_SNAKE_CASE_ )
# masks_queries_logits
UpperCAmelCase_: int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_: Optional[Any] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
UpperCAmelCase_: int = torch.tensor(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
# class_queries_logits
UpperCAmelCase_: Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape, (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_: Any = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], SCREAMING_SNAKE_CASE_, atol=SCREAMING_SNAKE_CASE_ ) )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(SCREAMING_SNAKE_CASE_ ).eval()
UpperCAmelCase_: Dict = self.default_image_processor
UpperCAmelCase_: str = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="""pt""", )
UpperCAmelCase_: int = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[int] = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""mask_labels"""]]
UpperCAmelCase_: int = [el.to(SCREAMING_SNAKE_CASE_ ) for el in inputs["""class_labels"""]]
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
self.assertTrue(outputs.loss is not None )
| 82
| 0
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"nvidia/segformer-b0-finetuned-ade-512-512": (
"https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : List[Any] = '''segformer'''
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[8, 4, 2, 1] , lowerCAmelCase__=[3_2, 6_4, 1_6_0, 2_5_6] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[1, 2, 5, 8] , lowerCAmelCase__=[4, 4, 4, 4] , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=2_5_6 , lowerCAmelCase__=2_5_5 , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
"""Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be"""
""" removed, as the behaviour will default to that of reshape_last_stage = True.""" , lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_encoder_blocks
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = sr_ratios
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = patch_sizes
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = mlp_ratios
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = classifier_dropout_prob
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = decoder_hidden_size
__SCREAMING_SNAKE_CASE = kwargs.get("""reshape_last_stage""" , lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Optional[int] = version.parse('''1.11''' )
@property
def snake_case_ ( self):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def snake_case_ ( self):
return 1E-4
@property
def snake_case_ ( self):
return 1_2
| 100
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=__a ):
"""simple docstring"""
__lowercase : Tuple = ['''keras_nlp''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__):
requires_backends(self , ["""keras_nlp"""])
| 100
| 1
|
from heapq import heappop, heappush
import numpy as np
def _SCREAMING_SNAKE_CASE ( lowercase : np.ndarray , lowercase : tuple[int, int] , lowercase : tuple[int, int] , lowercase : bool , ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = grid.shape
lowerCamelCase_ = [-1, 1, 0, 0]
lowerCamelCase_ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowerCamelCase_ , lowerCamelCase_ = [(0, source)], set()
lowerCamelCase_ = np.full((rows, cols) , np.inf )
lowerCamelCase_ = 0
lowerCamelCase_ = np.empty((rows, cols) , dtype=__a )
lowerCamelCase_ = None
while queue:
((lowerCamelCase_) , (lowerCamelCase_)) = heappop(__a )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowerCamelCase_ = []
while (x, y) != source:
path.append((x, y) )
lowerCamelCase_ , lowerCamelCase_ = predecessors[x, y]
path.append(__a ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__a ) ):
lowerCamelCase_ , lowerCamelCase_ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowerCamelCase_ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__a , (dist + 1, (nx, ny)) )
lowerCamelCase_ = dist + 1
lowerCamelCase_ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''OwlViTImageProcessor'''
UpperCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Tuple=None , A_ : Tuple=None , **A_ : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , A_ , )
lowerCamelCase_ = kwargs.pop('feature_extractor' )
lowerCamelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(A_ , A_ )
def __call__( self : List[str] , A_ : List[str]=None , A_ : List[Any]=None , A_ : Dict=None , A_ : Tuple="max_length" , A_ : int="np" , **A_ : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(A_ , A_ ) or (isinstance(A_ , A_ ) and not isinstance(text[0] , A_ )):
lowerCamelCase_ = [self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )]
elif isinstance(A_ , A_ ) and isinstance(text[0] , A_ ):
lowerCamelCase_ = []
# Maximum number of queries across batch
lowerCamelCase_ = max([len(A_ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(A_ ) != max_num_queries:
lowerCamelCase_ = t + [' '] * (max_num_queries - len(A_ ))
lowerCamelCase_ = self.tokenizer(A_ , padding=A_ , return_tensors=A_ , **A_ )
encodings.append(A_ )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
lowerCamelCase_ = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
lowerCamelCase_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
lowerCamelCase_ = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
lowerCamelCase_ = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
lowerCamelCase_ = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = input_ids
lowerCamelCase_ = attention_mask
if query_images is not None:
lowerCamelCase_ = BatchEncoding()
lowerCamelCase_ = self.image_processor(
A_ , return_tensors=A_ , **A_ ).pixel_values
lowerCamelCase_ = query_pixel_values
if images is not None:
lowerCamelCase_ = self.image_processor(A_ , return_tensors=A_ , **A_ )
if text is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
lowerCamelCase_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_ ) , tensor_type=A_ )
def a__ ( self : Tuple , *A_ : Dict , **A_ : Dict ) -> Any:
"""simple docstring"""
return self.image_processor.post_process(*A_ , **A_ )
def a__ ( self : List[str] , *A_ : Any , **A_ : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*A_ , **A_ )
def a__ ( self : Any , *A_ : str , **A_ : List[Any] ) -> Any:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*A_ , **A_ )
def a__ ( self : Union[str, Any] , *A_ : Any , **A_ : Union[str, Any] ) -> str:
"""simple docstring"""
return self.tokenizer.batch_decode(*A_ , **A_ )
def a__ ( self : Optional[int] , *A_ : List[Any] , **A_ : int ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*A_ , **A_ )
@property
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , A_ , )
return self.image_processor_class
@property
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , A_ , )
return self.image_processor
| 208
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCamelCase__ ( lowerCAmelCase):
def __init__(self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Optional[Any]:
_lowercase =feature_size
_lowercase =sampling_rate
_lowercase =padding_value
_lowercase =kwargs.pop('''padding_side''' , '''right''' )
_lowercase =kwargs.pop('''return_attention_mask''' , UpperCAmelCase )
super().__init__(**UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(UpperCAmelCase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
_lowercase ={
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
f" to this method that includes {self.model_input_names[0]}, but you provided"
f" {list(processed_features.keys() )}" )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =(
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(UpperCAmelCase ) == 0:
if return_attention_mask:
_lowercase =[]
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
_lowercase =required_input[0]
if isinstance(UpperCAmelCase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
_lowercase =0
while len(required_input[index] ) == 0:
index += 1
if index < len(UpperCAmelCase ):
_lowercase =required_input[index][0]
if return_tensors is None:
if is_tf_tensor(UpperCAmelCase ):
_lowercase ='''tf'''
elif is_torch_tensor(UpperCAmelCase ):
_lowercase ='''pt'''
elif isinstance(UpperCAmelCase , (int, float, list, tuple, np.ndarray) ):
_lowercase ='''np'''
else:
raise ValueError(
f"type of {first_element} unknown: {type(UpperCAmelCase )}. "
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
_lowercase =to_numpy(UpperCAmelCase )
else:
_lowercase =[to_numpy(UpperCAmelCase ) for v in value]
# Convert padding_strategy in PaddingStrategy
_lowercase =self._get_padding_strategies(padding=UpperCAmelCase , max_length=UpperCAmelCase )
_lowercase =processed_features[self.model_input_names[0]]
_lowercase =len(UpperCAmelCase )
if not all(len(UpperCAmelCase ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
_lowercase =[]
for i in range(UpperCAmelCase ):
_lowercase ={k: v[i] for k, v in processed_features.items()}
# truncation
_lowercase =self._truncate(
UpperCAmelCase , max_length=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
truncated_inputs.append(UpperCAmelCase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
_lowercase =max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
_lowercase =PaddingStrategy.MAX_LENGTH
_lowercase ={}
for i in range(UpperCAmelCase ):
# padding
_lowercase =self._pad(
truncated_inputs[i] , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
for key, value in outputs.items():
if key not in batch_outputs:
_lowercase =[]
if value.dtype is np.dtype(np.floataa ):
_lowercase =value.astype(np.floataa )
batch_outputs[key].append(UpperCAmelCase )
return BatchFeature(UpperCAmelCase , tensor_type=UpperCAmelCase )
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
_lowercase =processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
_lowercase =len(UpperCAmelCase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =padding_strategy != PaddingStrategy.DO_NOT_PAD and len(UpperCAmelCase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
_lowercase =np.ones(len(UpperCAmelCase ) , dtype=np.intaa )
if needs_to_be_padded:
_lowercase =max_length - len(UpperCAmelCase )
if self.padding_side == "right":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (0, difference) )
_lowercase =((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
_lowercase =np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
_lowercase =((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
_lowercase =np.pad(
UpperCAmelCase , UpperCAmelCase , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def __A (self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , ) -> List[str]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
_lowercase =processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
_lowercase =((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
_lowercase =len(UpperCAmelCase ) > max_length
if needs_to_be_truncated:
_lowercase =processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
_lowercase =processed_features['''attention_mask'''][:max_length]
return processed_features
def __A (self , UpperCAmelCase=False , UpperCAmelCase=None ) -> Dict:
# Get padding strategy
if padding is not False:
if padding is True:
_lowercase =PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =PaddingStrategy(UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
_lowercase =padding
else:
_lowercase =PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f"When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 5
|
def UpperCAmelCase_ ( __snake_case ) -> str:
"""simple docstring"""
_lowercase =0
# if input_string is "aba" than new_input_string become "a|b|a"
_lowercase =''''''
_lowercase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_lowercase , _lowercase =0, 0
# length[i] shows the length of palindromic substring with center i
_lowercase =[1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_lowercase =0
for j in range(len(__snake_case ) ):
_lowercase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_lowercase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_lowercase =j - k + 1 # noqa: E741
_lowercase =j + k - 1
# update max_length and start position
if max_length < length[j]:
_lowercase =length[j]
_lowercase =j
# create that string
_lowercase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 5
| 1
|
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Any = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : Dict = 0
while number > 0:
__UpperCAmelCase : Any = number % 10
sum_of_digits += last_digit
__UpperCAmelCase : Any = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def lowercase_ ( lowerCAmelCase__ : int = 100 ):
"""simple docstring"""
__UpperCAmelCase : Any = factorial(lowerCAmelCase__ )
__UpperCAmelCase : Optional[int] = split_and_add(lowerCAmelCase__ )
return result
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 16
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : List[str]
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="Translation" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __call__( self ) -> Any:
'''simple docstring'''
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
return {k: Value("""string""" ) for k in sorted(self.languages )}
@dataclass
class _A :
_SCREAMING_SNAKE_CASE : Optional[List] = None
_SCREAMING_SNAKE_CASE : Optional[int] = None
_SCREAMING_SNAKE_CASE : Optional[str] = None
# Automatically constructed
_SCREAMING_SNAKE_CASE : ClassVar[str] = "dict"
_SCREAMING_SNAKE_CASE : ClassVar[Any] = None
_SCREAMING_SNAKE_CASE : str = field(default="TranslationVariableLanguages" , init=__SCREAMING_SNAKE_CASE , repr=__SCREAMING_SNAKE_CASE )
def __A ( self ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Dict = sorted(set(self.languages ) ) if self.languages else None
__UpperCAmelCase : int = len(self.languages ) if self.languages else None
def __call__( self ) -> Optional[Any]:
'''simple docstring'''
return pa.struct({"""language""": pa.list_(pa.string() ), """translation""": pa.list_(pa.string() )} )
def __A ( self , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__UpperCAmelCase : List[Any] = set(self.languages )
if self.languages and set(__UpperCAmelCase ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({", ".join(__UpperCAmelCase )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__UpperCAmelCase : Dict = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = zip(*sorted(__UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __A ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Sequence, Value
return {
"language": Sequence(Value("""string""" ) ),
"translation": Sequence(Value("""string""" ) ),
}
| 16
| 1
|
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
lowercase_ = logging.getLogger(__name__)
lowercase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
lowercase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(snake_case_ )} , )
UpperCamelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case_( self ) -> str:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
UpperCamelCase = field(default=snake_case_ , metadata={'''help''': '''The input training data file (a text file).'''} )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
UpperCamelCase = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
UpperCamelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
UpperCamelCase = field(
default=snake_case_ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
UpperCamelCase = field(
default=0.15 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
UpperCamelCase = field(
default=snake_case_ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def snake_case_( self ) -> Optional[int]:
if self.train_file is not None:
_SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def lowerCamelCase ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ) ->Tuple:
with open(__lowerCamelCase , """r""" , encoding="""utf-8""" ) as f:
_SCREAMING_SNAKE_CASE = [json.loads(__lowerCamelCase ) for line in f.read().splitlines() if (len(__lowerCamelCase ) > 0 and not line.isspace())]
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {c: dataset[c] for c in dataset.column_names}
_SCREAMING_SNAKE_CASE = refs
return Dataset.from_dict(__lowerCamelCase )
def lowerCamelCase ( ) ->Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , __lowerCamelCase )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_SCREAMING_SNAKE_CASE = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[:{data_args.validation_split_percentage}%]' , )
_SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'train[{data_args.validation_split_percentage}%:]' , )
else:
_SCREAMING_SNAKE_CASE = {}
if data_args.train_file is not None:
_SCREAMING_SNAKE_CASE = data_args.train_file
if data_args.validation_file is not None:
_SCREAMING_SNAKE_CASE = data_args.validation_file
_SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
_SCREAMING_SNAKE_CASE = """text"""
_SCREAMING_SNAKE_CASE = load_dataset(__lowerCamelCase , data_files=__lowerCamelCase )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_SCREAMING_SNAKE_CASE = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.config_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(F'New config: {config}' )
_SCREAMING_SNAKE_CASE = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **__lowerCamelCase )
elif model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **__lowerCamelCase )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
_SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_SCREAMING_SNAKE_CASE = AutoModelForMaskedLM.from_config(__lowerCamelCase )
model.resize_token_embeddings(len(__lowerCamelCase ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_SCREAMING_SNAKE_CASE = datasets["""train"""].column_names
else:
_SCREAMING_SNAKE_CASE = datasets["""validation"""].column_names
_SCREAMING_SNAKE_CASE = """text""" if """text""" in column_names else column_names[0]
_SCREAMING_SNAKE_CASE = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(__lowerCamelCase : List[str] ):
# Remove empty lines
_SCREAMING_SNAKE_CASE = [line for line in examples["""text"""] if len(__lowerCamelCase ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=data_args.max_seq_length )
_SCREAMING_SNAKE_CASE = datasets.map(
__lowerCamelCase , batched=__lowerCamelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_SCREAMING_SNAKE_CASE = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_SCREAMING_SNAKE_CASE = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_SCREAMING_SNAKE_CASE = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_SCREAMING_SNAKE_CASE = False
# Data collator
# This one will take care of randomly masking the tokens.
_SCREAMING_SNAKE_CASE = DataCollatorForWholeWordMask(tokenizer=__lowerCamelCase , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_SCREAMING_SNAKE_CASE = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_SCREAMING_SNAKE_CASE = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_SCREAMING_SNAKE_CASE = model_args.model_name_or_path
else:
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__lowerCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
_SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
_SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_SCREAMING_SNAKE_CASE = trainer.evaluate()
_SCREAMING_SNAKE_CASE = math.exp(eval_output["""eval_loss"""] )
_SCREAMING_SNAKE_CASE = perplexity
_SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
return results
def lowerCamelCase ( __lowerCamelCase : List[str] ) ->Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 58
|
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =None
_lowercase =BloomTokenizerFast
_lowercase =BloomTokenizerFast
_lowercase =True
_lowercase =False
_lowercase ='''tokenizer_file'''
_lowercase ={'''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''unk_token''': '''<unk>''', '''pad_token''': '''<pad>'''}
def __a ( self ) -> Dict:
super().setUp()
lowerCAmelCase_ = BloomTokenizerFast.from_pretrained("bigscience/tokenizer" )
tokenizer.save_pretrained(self.tmpdirname )
def __a ( self , **_UpperCamelCase ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def __a ( self ) -> List[str]:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = ["The quick brown fox</s>", "jumps over the lazy dog</s>"]
lowerCAmelCase_ = [[2_175, 23_714, 73_173, 144_252, 2], [77, 132_619, 3_478, 368, 109_586, 35_433, 2]]
lowerCAmelCase_ = tokenizer.batch_encode_plus(_UpperCamelCase )["input_ids"]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase_ = tokenizer.batch_decode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self , _UpperCamelCase=6 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
lowerCAmelCase_ = "This is a simple input"
lowerCAmelCase_ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase_ = ("This is a simple input", "This is a pair")
lowerCAmelCase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
try:
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.encode(_UpperCamelCase , max_length=_UpperCamelCase )
tokenizer_r.batch_encode_plus(_UpperCamelCase , max_length=_UpperCamelCase )
except ValueError:
self.fail("Bloom Tokenizer should be able to deal with padding" )
lowerCAmelCase_ = None # Hotfixing padding = None
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def __a ( self ) -> Any:
lowerCAmelCase_ = self.get_rust_tokenizer()
lowerCAmelCase_ = load_dataset("xnli" , "all_languages" , split="test" , streaming=_UpperCamelCase )
lowerCAmelCase_ = next(iter(_UpperCamelCase ) )["premise"] # pick up one data
lowerCAmelCase_ = list(sample_data.values() )
lowerCAmelCase_ = list(map(tokenizer.encode , _UpperCamelCase ) )
lowerCAmelCase_ = [tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase ) for x in output_tokens]
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def __a ( self ) -> List[Any]:
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map ) , 1 )
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values() )[0] ) , 1 )
| 231
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_UpperCamelCase = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = ["""GLPNFeatureExtractor"""]
_UpperCamelCase = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 234
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case ):
"""simple docstring"""
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self ,A = True ,A = None ,A = PILImageResampling.BILINEAR ,A = True ,A = None ,A = True ,A = 1 / 255 ,A = True ,A = True ,A = None ,A = None ,**A ,):
super().__init__(**A )
UpperCAmelCase = size if size is not None else {"""shortest_edge""": 256}
UpperCAmelCase = get_size_dict(A ,default_to_square=A )
UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" )
UpperCAmelCase = do_resize
UpperCAmelCase = size
UpperCAmelCase = do_center_crop
UpperCAmelCase = crop_size
UpperCAmelCase = resample
UpperCAmelCase = do_rescale
UpperCAmelCase = rescale_factor
UpperCAmelCase = offset
UpperCAmelCase = do_normalize
UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCamelCase ( self ,A ,A ,A = PILImageResampling.BILINEAR ,A = None ,**A ,):
UpperCAmelCase = get_size_dict(A ,default_to_square=A )
if "shortest_edge" in size:
UpperCAmelCase = get_resize_output_image_size(A ,size["""shortest_edge"""] ,default_to_square=A )
elif "height" in size and "width" in size:
UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(F'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(A ,size=A ,resample=A ,data_format=A ,**A )
def _UpperCamelCase ( self ,A ,A ,A = None ,**A ,):
UpperCAmelCase = get_size_dict(A )
if "height" not in size or "width" not in size:
raise ValueError(F'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(A ,size=(size["""height"""], size["""width"""]) ,data_format=A ,**A )
def _UpperCamelCase ( self ,A ,A ,A = True ,A = None ,**A ,):
UpperCAmelCase = image.astype(np.floataa )
if offset:
UpperCAmelCase = image - (scale / 2)
return rescale(A ,scale=A ,data_format=A ,**A )
def _UpperCamelCase ( self ,A ,A ,A ,A = None ,**A ,):
return normalize(A ,mean=A ,std=A ,data_format=A ,**A )
def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
if offset and not do_rescale:
raise ValueError("""For offset, do_rescale must also be set to True.""" )
# All transformations expect numpy arrays.
UpperCAmelCase = to_numpy_array(A )
if do_resize:
UpperCAmelCase = self.resize(image=A ,size=A ,resample=A )
if do_center_crop:
UpperCAmelCase = self.center_crop(A ,size=A )
if do_rescale:
UpperCAmelCase = self.rescale(image=A ,scale=A ,offset=A )
if do_normalize:
UpperCAmelCase = self.normalize(image=A ,mean=A ,std=A )
UpperCAmelCase = to_channel_dimension_format(A ,A )
return image
def _UpperCamelCase ( self ,A ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = None ,A = ChannelDimension.FIRST ,**A ,):
UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase = resample if resample is not None else self.resample
UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase = offset if offset is not None else self.offset
UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase = image_std if image_std is not None else self.image_std
UpperCAmelCase = size if size is not None else self.size
UpperCAmelCase = get_size_dict(A ,default_to_square=A )
UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase = get_size_dict(A ,param_name="""crop_size""" )
if not valid_images(A ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
UpperCAmelCase = make_batched(A )
UpperCAmelCase = [
[
self._preprocess_image(
image=A ,do_resize=A ,size=A ,resample=A ,do_center_crop=A ,crop_size=A ,do_rescale=A ,rescale_factor=A ,offset=A ,do_normalize=A ,image_mean=A ,image_std=A ,data_format=A ,)
for img in video
]
for video in videos
]
UpperCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=A ,tensor_type=A )
| 234
| 1
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class __magic_name__ ( lowerCamelCase__):
def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=1024 , lowercase_ : Optional[int]=1024 , lowercase_ : Union[str, Any]=3.6 ):
lowercase_ : str = tokenizer
lowercase_ : int = tokenizer.bos_token_id
lowercase_ : Union[str, Any] = dataset
lowercase_ : Union[str, Any] = seq_length
lowercase_ : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ):
lowercase_ : str = iter(self.dataset )
lowercase_ : Any = True
while more_examples:
lowercase_ , lowercase_ : List[Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_snake_case )["""content"""] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase_ : Tuple = False
break
lowercase_ : Dict = tokenizer(_snake_case , truncation=_snake_case )["""input_ids"""]
lowercase_ : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_snake_case ) , self.seq_length ):
lowercase_ : Dict = all_token_ids[i : i + self.seq_length]
if len(_snake_case ) == self.seq_length:
yield torch.tensor(_snake_case )
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Optional[int]:
lowercase_ : Any = {"""streaming""": True}
lowercase_ : Dict = load_dataset(args.dataset_name , split="""train""" , **UpperCAmelCase__ )
lowercase_ : Dict = ConstantLengthDataset(UpperCAmelCase__ , UpperCAmelCase__ , seq_length=args.seq_length )
lowercase_ : List[str] = DataLoader(UpperCAmelCase__ , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Dict:
model.eval()
lowercase_ : int = []
for step, batch in enumerate(UpperCAmelCase__ ):
with torch.no_grad():
lowercase_ : Dict = model(UpperCAmelCase__ , labels=UpperCAmelCase__ )
lowercase_ : Union[str, Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCAmelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase_ : Any = torch.mean(torch.cat(UpperCAmelCase__ ) )
try:
lowercase_ : Optional[int] = torch.exp(UpperCAmelCase__ )
except OverflowError:
lowercase_ : Tuple = float("""inf""" )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowercase : Any = Accelerator()
# Parse configuration
_lowercase : Tuple = HfArgumentParser(EvaluationArguments)
_lowercase : Optional[Any] = parser.parse_args()
set_seed(args.seed)
# Logging
_lowercase : int = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
# Load model and tokenizer
_lowercase : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowercase : Optional[int] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowercase : Union[str, Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowercase , _lowercase : Union[str, Any] = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("Evaluating and saving model after training")
_lowercase , _lowercase : Optional[int] = evaluate(args)
logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
| 239
|
from collections.abc import Iterable
from typing import Generic, TypeVar
A__ = TypeVar("""_T""")
class __lowerCAmelCase ( Generic[_T] ):
def __init__( self , _snake_case = None ):
"""simple docstring"""
_lowerCAmelCase = list(iterable or [] )
_lowerCAmelCase = []
def __len__( self ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
"""simple docstring"""
return F'Queue({tuple(self._stacka[::-1] + self._stacka )})'
def snake_case ( self , _snake_case ):
"""simple docstring"""
self._stacka.append(_snake_case )
def snake_case ( self ):
"""simple docstring"""
_lowerCAmelCase = self._stacka.pop
_lowerCAmelCase = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 82
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt-neox-20b""": 2_0_4_8,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> List[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297
|
import sys
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
lowercase_ = len(__lowerCamelCase )
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
lowercase_ = [[0 for x in range(__lowerCamelCase )] for x in range(__lowerCamelCase )]
for chain_length in range(2 , __lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
lowercase_ = a + chain_length - 1
lowercase_ = sys.maxsize
for c in range(__lowerCamelCase , __lowerCamelCase ):
lowercase_ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
lowercase_ = cost
lowercase_ = c
return matrix, sol
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: Optional[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: Dict ):
'''simple docstring'''
if i == j:
print("A" + str(__lowerCamelCase ) , end=" " )
else:
print("(" , end=" " )
print_optiomal_solution(__lowerCamelCase , __lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(__lowerCamelCase , optimal_solution[i][j] + 1 , __lowerCamelCase )
print(")" , end=" " )
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = [30, 35, 15, 5, 10, 20, 25]
lowercase_ = len(__lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
lowercase_ , lowercase_ = matrix_chain_order(__lowerCamelCase )
print("No. of Operation required: " + str(matrix[1][n - 1] ) )
print_optiomal_solution(__lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 297
| 1
|
"""simple docstring"""
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> int:
'''simple docstring'''
return int(input_a == input_a == 0)
def _A ( ) -> None:
'''simple docstring'''
print("Truth Table of NOR Gate:")
print("| Input 1 | Input 2 | Output |")
print(F"""| 0 | 0 | {nor_gate(0, 0)} |""")
print(F"""| 0 | 1 | {nor_gate(0, 1)} |""")
print(F"""| 1 | 0 | {nor_gate(1, 0)} |""")
print(F"""| 1 | 1 | {nor_gate(1, 1)} |""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 17
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Optional[int]:
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Union[str, Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = XLMProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
else:
__lowerCamelCase : Optional[int] = ProphetNetForConditionalGenerationOld.from_pretrained(_lowerCAmelCase )
__lowerCamelCase ,__lowerCamelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained(
_lowerCAmelCase ,output_loading_info=_lowerCAmelCase )
__lowerCamelCase : Union[str, Any] = ['key_proj', 'value_proj', 'query_proj']
__lowerCamelCase : Optional[Any] = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : Optional[int] = key.split('.' )
if attributes[0] == "lm_head":
__lowerCamelCase : Dict = prophet
__lowerCamelCase : List[Any] = prophet_old
else:
__lowerCamelCase : Any = prophet.prophetnet
__lowerCamelCase : Any = prophet_old.model
__lowerCamelCase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Any = mapping[attribute]
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ) and len(_lowerCAmelCase ) > 0:
__lowerCamelCase : int = attribute
elif hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : List[Any] = old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : Dict = True
break
elif attribute in special_keys and hasattr(_lowerCAmelCase ,'in_proj_weight' ):
__lowerCamelCase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Optional[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Dict = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : str = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Optional[int] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : Optional[int] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict = True
break
if attribute.isdigit():
__lowerCamelCase : List[str] = model[int(_lowerCAmelCase )]
__lowerCamelCase : Union[str, Any] = old_model[int(_lowerCAmelCase )]
else:
__lowerCamelCase : Union[str, Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_attribute == "":
__lowerCamelCase : str = old_model
else:
if not hasattr(_lowerCAmelCase ,_lowerCAmelCase ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : str = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 208
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCAmelCase_ , unittest.TestCase):
__SCREAMING_SNAKE_CASE = ConsistencyModelPipeline
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__SCREAMING_SNAKE_CASE = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__SCREAMING_SNAKE_CASE = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
])
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet""" , )
return unet
@property
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = UNetaDModel.from_pretrained(
"""diffusers/consistency-models-test""" , subfolder="""test_unet_class_cond""" , )
return unet
def __lowerCamelCase ( self , lowercase=False ) -> Union[str, Any]:
if class_cond:
__UpperCamelCase = self.dummy_cond_unet
else:
__UpperCamelCase = self.dummy_uncond_unet
# Default to CM multistep sampler
__UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def __lowerCamelCase ( self , lowercase , lowercase=0 ) -> int:
if str(lowercase ).startswith("""mps""" ):
__UpperCamelCase = torch.manual_seed(lowercase )
else:
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = {
"""batch_size""": 1,
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""generator""": generator,
"""output_type""": """np""",
}
return inputs
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = ConsistencyModelPipeline(**lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components(class_cond=lowercase )
__UpperCamelCase = ConsistencyModelPipeline(**lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = 0
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.3_572, 0.6_273, 0.4_031, 0.3_961, 0.4_321, 0.5_730, 0.5_266, 0.4_780, 0.5_004] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = ConsistencyModelPipeline(**lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = 1
__UpperCamelCase = None
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = self.get_dummy_components(class_cond=lowercase )
__UpperCamelCase = ConsistencyModelPipeline(**lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_dummy_inputs(lowercase )
__UpperCamelCase = 1
__UpperCamelCase = None
__UpperCamelCase = 0
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 3_2, 3_2, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.5_004, 0.5_004, 0.4_994, 0.5_008, 0.4_976, 0.5_018, 0.4_990, 0.4_982, 0.4_987] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self , lowercase=0 , lowercase=False , lowercase="cpu" , lowercase=torch.floataa , lowercase=(1, 3, 6_4, 6_4) ) -> Optional[int]:
__UpperCamelCase = torch.manual_seed(lowercase )
__UpperCamelCase = {
"""num_inference_steps""": None,
"""timesteps""": [2_2, 0],
"""class_labels""": 0,
"""generator""": generator,
"""output_type""": """np""",
}
if get_fixed_latents:
__UpperCamelCase = self.get_fixed_latents(seed=lowercase , device=lowercase , dtype=lowercase , shape=lowercase )
__UpperCamelCase = latents
return inputs
def __lowerCamelCase ( self , lowercase=0 , lowercase="cpu" , lowercase=torch.floataa , lowercase=(1, 3, 6_4, 6_4) ) -> Tuple:
if type(lowercase ) == str:
__UpperCamelCase = torch.device(lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__UpperCamelCase = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
return latents
def __lowerCamelCase ( self ) -> List[str]:
__UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase = ConsistencyModelPipeline(unet=lowercase , scheduler=lowercase )
pipe.to(torch_device=lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0_888, 0.0_881, 0.0_666, 0.0_479, 0.0_292, 0.0_195, 0.0_201, 0.0_163, 0.0_254] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def __lowerCamelCase ( self ) -> Optional[Any]:
__UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase = ConsistencyModelPipeline(unet=lowercase , scheduler=lowercase )
pipe.to(torch_device=lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs()
__UpperCamelCase = 1
__UpperCamelCase = None
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.0_340, 0.0_152, 0.0_063, 0.0_267, 0.0_221, 0.0_107, 0.0_416, 0.0_186, 0.0_217] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def __lowerCamelCase ( self ) -> Tuple:
__UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase = ConsistencyModelPipeline(unet=lowercase , scheduler=lowercase )
pipe.to(torch_device=lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(get_fixed_latents=lowercase , device=lowercase )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase , enable_math=lowercase , enable_mem_efficient=lowercase ):
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.1_875, 0.1_428, 0.1_289, 0.2_151, 0.2_092, 0.1_477, 0.1_877, 0.1_641, 0.1_353] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def __lowerCamelCase ( self ) -> int:
__UpperCamelCase = UNetaDModel.from_pretrained("""diffusers/consistency_models""" , subfolder="""diffusers_cd_imagenet64_l2""" )
__UpperCamelCase = CMStochasticIterativeScheduler(
num_train_timesteps=4_0 , sigma_min=0.002 , sigma_max=80.0 , )
__UpperCamelCase = ConsistencyModelPipeline(unet=lowercase , scheduler=lowercase )
pipe.to(torch_device=lowercase , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = self.get_inputs(get_fixed_latents=lowercase , device=lowercase )
__UpperCamelCase = 1
__UpperCamelCase = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowercase , enable_math=lowercase , enable_mem_efficient=lowercase ):
__UpperCamelCase = pipe(**lowercase ).images
assert image.shape == (1, 6_4, 6_4, 3)
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = np.array([0.1_663, 0.1_948, 0.2_275, 0.1_680, 0.1_204, 0.1_245, 0.1_858, 0.1_338, 0.2_095] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 243
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self ) -> Any:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return model
@property
def __lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
__UpperCamelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , cross_attention_dim=1_0 , )
return model
@property
def __lowerCamelCase ( self ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""DownEncoderBlock2D""", """DownEncoderBlock2D""") , up_block_types=("""UpDecoderBlock2D""", """UpDecoderBlock2D""") , )
__UpperCamelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=("""AttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """AttnUpBlock2D""") , )
return vqvae, unet
@slow
def __lowerCamelCase ( self ) -> Any:
__UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCamelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
__UpperCamelCase = DDPMScheduler()
__UpperCamelCase = AudioDiffusionPipeline(vqvae=lowercase , unet=self.dummy_unet , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase , steps=4 , return_dict=lowercase )
__UpperCamelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.frombuffer(image_from_tuple.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
__UpperCamelCase = DDIMScheduler()
__UpperCamelCase = self.dummy_vqvae_and_unet
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(raw_audio=lowercase , generator=lowercase , start_step=5 , steps=1_0 )
__UpperCamelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
__UpperCamelCase = self.dummy_unet_condition
__UpperCamelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowercase , mel=lowercase , scheduler=lowercase )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
np.random.seed(0 )
__UpperCamelCase = torch.rand((1, 1, 1_0) )
__UpperCamelCase = pipe(generator=lowercase , encoding=lowercase )
__UpperCamelCase = output.images[0]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase):
def __lowerCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ) -> str:
__UpperCamelCase = torch_device
__UpperCamelCase = DiffusionPipeline.from_pretrained("""teticio/audio-diffusion-ddim-256""" )
__UpperCamelCase = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
__UpperCamelCase = torch.Generator(device=lowercase ).manual_seed(4_2 )
__UpperCamelCase = pipe(generator=lowercase )
__UpperCamelCase = output.audios[0]
__UpperCamelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
__UpperCamelCase = np.frombuffer(image.tobytes() , dtype="""uint8""" )[:1_0]
__UpperCamelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 243
| 1
|
"""simple docstring"""
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[Any] = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __UpperCAmelCase ( __lowerCamelCase ) -> int:
lowercase__ : Optional[int] = 0
while number > 0:
lowercase__ : List[Any] = number % 10
sum_of_digits += last_digit
lowercase__ : List[str] = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __UpperCAmelCase ( __lowerCamelCase = 1_00 ) -> int:
lowercase__ : Any = factorial(__lowerCamelCase )
lowercase__ : Dict = split_and_add(__lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 16
|
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __A :
'''simple docstring'''
def __init__( self : Optional[int] ,_snake_case : Optional[Any] ,_snake_case : Union[str, Any]=13 ,_snake_case : Any=32 ,_snake_case : int=2 ,_snake_case : str=3 ,_snake_case : Optional[Any]=16 ,_snake_case : List[Any]=[1, 2, 1] ,_snake_case : Dict=[2, 2, 4] ,_snake_case : List[Any]=2 ,_snake_case : Any=2.0 ,_snake_case : Optional[int]=True ,_snake_case : Optional[int]=0.0 ,_snake_case : Union[str, Any]=0.0 ,_snake_case : str=0.1 ,_snake_case : List[Any]="gelu" ,_snake_case : Tuple=False ,_snake_case : Optional[int]=True ,_snake_case : str=0.02 ,_snake_case : List[str]=1e-5 ,_snake_case : int=True ,_snake_case : Dict=None ,_snake_case : str=True ,_snake_case : List[Any]=10 ,_snake_case : Any=8 ,) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Dict = parent
lowercase__ : Any = batch_size
lowercase__ : Union[str, Any] = image_size
lowercase__ : Dict = patch_size
lowercase__ : int = num_channels
lowercase__ : Any = embed_dim
lowercase__ : int = depths
lowercase__ : Dict = num_heads
lowercase__ : List[Any] = window_size
lowercase__ : int = mlp_ratio
lowercase__ : Optional[int] = qkv_bias
lowercase__ : str = hidden_dropout_prob
lowercase__ : List[Any] = attention_probs_dropout_prob
lowercase__ : Dict = drop_path_rate
lowercase__ : int = hidden_act
lowercase__ : Tuple = use_absolute_embeddings
lowercase__ : Tuple = patch_norm
lowercase__ : Tuple = layer_norm_eps
lowercase__ : Optional[Any] = initializer_range
lowercase__ : int = is_training
lowercase__ : Optional[int] = scope
lowercase__ : str = use_labels
lowercase__ : Dict = type_sequence_label_size
lowercase__ : Union[str, Any] = encoder_stride
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ : Optional[Any] = None
if self.use_labels:
lowercase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase ( self : str ,_snake_case : Dict ,_snake_case : List[str] ,_snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Any = SwinvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case )
lowercase__ : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : List[str] ,_snake_case : Optional[Any] ,_snake_case : int ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = SwinvaForMaskedImageModeling(config=_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Tuple = model(_snake_case )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ : Optional[int] = 1
lowercase__ : List[Any] = SwinvaForMaskedImageModeling(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ : str = model(_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase ( self : str ,_snake_case : str ,_snake_case : str ,_snake_case : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.type_sequence_label_size
lowercase__ : Dict = SwinvaForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
lowercase__ : str = model(_snake_case ,labels=_snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
lowercase__ : Optional[int] = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = config_and_inputs
lowercase__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( A_ ,A_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowerCAmelCase : Optional[int] = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ : Optional[Any] = SwinvaModelTester(self )
lowercase__ : List[str] = ConfigTester(self ,config_class=_snake_case ,embed_dim=37 )
def UpperCAmelCase ( self : int ) -> Any:
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
lowercase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : List[Any] = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase__ : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case ,nn.Linear ) )
def UpperCAmelCase ( self : int ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ : str = model_class(_snake_case )
lowercase__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ : Optional[Any] = [*signature.parameters.keys()]
lowercase__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = True
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = True
lowercase__ : str = False
lowercase__ : Union[str, Any] = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : str = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Dict = outputs.attentions
lowercase__ : Any = len(self.model_tester.depths )
self.assertEqual(len(_snake_case ) ,_snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = config.window_size**2
lowercase__ : Any = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : List[str] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowercase__ : Optional[Any] = len(_snake_case )
# Check attention is always last and order is fine
lowercase__ : Optional[int] = True
lowercase__ : Tuple = True
lowercase__ : Optional[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : Optional[Any] = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
if hasattr(self.model_tester ,'''num_hidden_states_types''' ):
lowercase__ : int = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowercase__ : List[str] = 2
self.assertEqual(out_len + added_hidden_states ,len(_snake_case ) )
lowercase__ : Optional[int] = outputs.attentions
self.assertEqual(len(_snake_case ) ,_snake_case )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def UpperCAmelCase ( self : List[str] ,_snake_case : int ,_snake_case : List[str] ,_snake_case : Optional[int] ,_snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : List[Any] = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
lowercase__ : int = model(**self._prepare_for_class(_snake_case ,_snake_case ) )
lowercase__ : Optional[int] = outputs.hidden_states
lowercase__ : List[Any] = getattr(
self.model_tester ,'''expected_num_hidden_layers''' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_snake_case ) ,_snake_case )
# Swinv2 has a different seq_length
lowercase__ : Dict = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowercase__ : Tuple = outputs.reshaped_hidden_states
self.assertEqual(len(_snake_case ) ,_snake_case )
lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[str] = reshaped_hidden_states[0].shape
lowercase__ : int = (
reshaped_hidden_states[0].view(_snake_case ,_snake_case ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase__ : List[str] = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,_snake_case )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : List[Any] = 3
lowercase__ : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ : Optional[int] = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ : Dict = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase__ : str = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ : Dict = True
self.check_hidden_states_output(_snake_case ,_snake_case ,_snake_case ,(padded_height, padded_width) )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_snake_case )
def UpperCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def UpperCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ : Union[str, Any] = SwinvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ : Tuple = _config_zero_init(_snake_case )
for model_class in self.all_model_classes:
lowercase__ : Optional[int] = model_class(config=_snake_case )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase__ : str = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_snake_case )
lowercase__ : Union[str, Any] = self.default_image_processor
lowercase__ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowercase__ : Dict = image_processor(images=_snake_case ,return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
lowercase__ : Optional[Any] = model(**_snake_case )
# verify the logits
lowercase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape ,_snake_case )
lowercase__ : Dict = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_snake_case ,atol=1e-4 ) )
| 16
| 1
|
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
a : Optional[Any] = sys.version_info >= (3, 1_0)
def __lowerCamelCase ( _lowercase=None , _lowercase=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class UpperCamelCase_ :
lowercase = False
lowercase = True
lowercase = None
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'titi'
lowercase = 'toto'
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'titi'
lowercase = 'toto'
lowercase = 42
@dataclass
class UpperCamelCase_ :
lowercase = 'toto'
def _lowercase( self ) -> Dict:
UpperCAmelCase : int = BasicEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowercase = 'toto'
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Any = MixedTypeEnum(self.foo )
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = field(default=__magic_name__ , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
@dataclass
class UpperCamelCase_ :
lowercase = list_field(default=[] )
lowercase = list_field(default=[1, 2, 3] )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
lowercase = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class UpperCamelCase_ :
lowercase = field()
lowercase = field()
lowercase = field()
def _lowercase( self ) -> Any:
UpperCAmelCase : Dict = BasicEnum(self.required_enum )
@dataclass
class UpperCamelCase_ :
lowercase = 42
lowercase = field()
lowercase = None
lowercase = field(default='toto' , metadata={'help': 'help message'} )
lowercase = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class UpperCamelCase_ :
lowercase = False
lowercase = True
lowercase = None
@dataclass
class UpperCamelCase_ :
lowercase = None
lowercase = field(default=__magic_name__ , metadata={'help': 'help message'} )
lowercase = None
lowercase = list_field(default=[] )
lowercase = list_field(default=[] )
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A ) -> List[str]:
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
UpperCAmelCase : Optional[Any] = {k: v for k, v in vars(A ).items() if k != """container"""}
UpperCAmelCase : Any = {k: v for k, v in vars(A ).items() if k != """container"""}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("""choices""" , A ) and yy.get("""choices""" , A ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["""type"""](A ) , yy["""type"""](A ) )
del xx["type"], yy["type"]
self.assertEqual(A , A )
def _lowercase( self ) -> Dict:
UpperCAmelCase : Any = HfArgumentParser(A )
UpperCAmelCase : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A , required=A )
expected.add_argument("""--bar""" , type=A , required=A )
expected.add_argument("""--baz""" , type=A , required=A )
expected.add_argument("""--flag""" , type=A , default=A , const=A , nargs="""?""" )
self.argparsersEqual(A , A )
UpperCAmelCase : Optional[Any] = ["""--foo""", """1""", """--baz""", """quux""", """--bar""", """0.5"""]
(UpperCAmelCase ) : Dict = parser.parse_args_into_dataclasses(A , look_for_args_file=A )
self.assertFalse(example.flag )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = HfArgumentParser(A )
UpperCAmelCase : str = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=42 , type=A )
expected.add_argument("""--baz""" , default="""toto""" , type=A , help="""help message""" )
self.argparsersEqual(A , A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Tuple = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A , default=A , const=A , nargs="""?""" )
expected.add_argument("""--baz""" , type=A , default=A , const=A , nargs="""?""" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("""--no_baz""" , action="""store_false""" , default=A , dest="""baz""" )
expected.add_argument("""--opt""" , type=A , default=A )
UpperCAmelCase : Union[str, Any] = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
UpperCAmelCase : List[Any] = HfArgumentParser(A )
self.argparsersEqual(A , A )
UpperCAmelCase : Optional[Any] = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
UpperCAmelCase : Tuple = parser.parse_args(["""--foo""", """--no_baz"""] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
UpperCAmelCase : List[Any] = parser.parse_args(["""--foo""", """--baz"""] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
UpperCAmelCase : Dict = parser.parse_args(["""--foo""", """True""", """--baz""", """True""", """--opt""", """True"""] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
UpperCAmelCase : Optional[Any] = parser.parse_args(["""--foo""", """False""", """--baz""", """False""", """--opt""", """False"""] )
self.assertEqual(A , Namespace(foo=A , baz=A , opt=A ) )
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : Any = HfArgumentParser(A )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=["""titi""", """toto""", 42] , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A , A )
UpperCAmelCase : List[Any] = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase : int = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
UpperCAmelCase : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase : Tuple = parser.parse_args_into_dataclasses(["""--foo""", """titi"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
UpperCAmelCase : Any = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
UpperCAmelCase : str = parser.parse_args_into_dataclasses(["""--foo""", """42"""] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def _lowercase( self ) -> Optional[int]:
@dataclass
class UpperCamelCase_ :
lowercase = 'toto'
UpperCAmelCase : int = HfArgumentParser(A )
UpperCAmelCase : Dict = argparse.ArgumentParser()
expected.add_argument(
"""--foo""" , default="""toto""" , choices=("""titi""", """toto""", 42) , type=make_choice_type_function(["""titi""", """toto""", 42] ) , )
self.argparsersEqual(A , A )
UpperCAmelCase : Tuple = parser.parse_args([] )
self.assertEqual(args.foo , """toto""" )
UpperCAmelCase : Optional[int] = parser.parse_args(["""--foo""", """titi"""] )
self.assertEqual(args.foo , """titi""" )
UpperCAmelCase : int = parser.parse_args(["""--foo""", """42"""] )
self.assertEqual(args.foo , 42 )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : int = HfArgumentParser(A )
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo_int""" , nargs="""+""" , default=[] , type=A )
expected.add_argument("""--bar_int""" , nargs="""+""" , default=[1, 2, 3] , type=A )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A )
expected.add_argument("""--foo_float""" , nargs="""+""" , default=[0.1, 0.2, 0.3] , type=A )
self.argparsersEqual(A , A )
UpperCAmelCase : int = parser.parse_args([] )
self.assertEqual(
A , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["""Hallo""", """Bonjour""", """Hello"""] , foo_float=[0.1, 0.2, 0.3] ) , )
UpperCAmelCase : Optional[int] = parser.parse_args("""--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7""".split() )
self.assertEqual(A , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["""a""", """b""", """c"""] , foo_float=[0.1, 0.7] ) )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : Any = argparse.ArgumentParser()
expected.add_argument("""--foo""" , default=A , type=A )
expected.add_argument("""--bar""" , default=A , type=A , help="""help message""" )
expected.add_argument("""--baz""" , default=A , type=A )
expected.add_argument("""--ces""" , nargs="""+""" , default=[] , type=A )
expected.add_argument("""--des""" , nargs="""+""" , default=[] , type=A )
UpperCAmelCase : Optional[Any] = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(A )
for dataclass_type in dataclass_types:
UpperCAmelCase : Optional[Any] = HfArgumentParser(A )
self.argparsersEqual(A , A )
UpperCAmelCase : List[Any] = parser.parse_args([] )
self.assertEqual(A , Namespace(foo=A , bar=A , baz=A , ces=[] , des=[] ) )
UpperCAmelCase : Tuple = parser.parse_args("""--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3""".split() )
self.assertEqual(A , Namespace(foo=12 , bar=3.1_4 , baz="""42""" , ces=["""a""", """b""", """c"""] , des=[1, 2, 3] ) )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = HfArgumentParser(A )
UpperCAmelCase : List[str] = argparse.ArgumentParser()
expected.add_argument("""--required_list""" , nargs="""+""" , type=A , required=A )
expected.add_argument("""--required_str""" , type=A , required=A )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A , )
self.argparsersEqual(A , A )
def _lowercase( self ) -> List[str]:
UpperCAmelCase : Optional[int] = HfArgumentParser(A )
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
expected.add_argument("""--foo""" , type=A , required=A )
expected.add_argument(
"""--required_enum""" , type=make_choice_type_function(["""titi""", """toto"""] ) , choices=["""titi""", """toto"""] , required=A , )
expected.add_argument("""--opt""" , type=A , default=A )
expected.add_argument("""--baz""" , default="""toto""" , type=A , help="""help message""" )
expected.add_argument("""--foo_str""" , nargs="""+""" , default=["""Hallo""", """Bonjour""", """Hello"""] , type=A )
self.argparsersEqual(A , A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Optional[int] = HfArgumentParser(A )
UpperCAmelCase : Dict = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
UpperCAmelCase : List[Any] = parser.parse_dict(A )[0]
UpperCAmelCase : Optional[int] = BasicExample(**A )
self.assertEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : str = HfArgumentParser(A )
UpperCAmelCase : int = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
"""extra""": 42,
}
self.assertRaises(A , parser.parse_dict , A , allow_extra_keys=A )
def _lowercase( self ) -> Any:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(A )
UpperCAmelCase : Any = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : List[Any] = os.path.join(A , """temp_json""" )
os.mkdir(A )
with open(temp_local_path + """.json""" , """w+""" ) as f:
json.dump(A , A )
UpperCAmelCase : Union[str, Any] = parser.parse_yaml_file(Path(temp_local_path + """.json""" ) )[0]
UpperCAmelCase : List[str] = BasicExample(**A )
self.assertEqual(A , A )
def _lowercase( self ) -> int:
UpperCAmelCase : Tuple = HfArgumentParser(A )
UpperCAmelCase : int = {
"""foo""": 12,
"""bar""": 3.1_4,
"""baz""": """42""",
"""flag""": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase : Optional[Any] = os.path.join(A , """temp_yaml""" )
os.mkdir(A )
with open(temp_local_path + """.yaml""" , """w+""" ) as f:
yaml.dump(A , A )
UpperCAmelCase : List[Any] = parser.parse_yaml_file(Path(temp_local_path + """.yaml""" ) )[0]
UpperCAmelCase : Optional[Any] = BasicExample(**A )
self.assertEqual(A , A )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Union[str, Any] = HfArgumentParser(A )
self.assertIsNotNone(A )
| 364
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
"""facebook/detr-resnet-50""": """https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json""",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'detr'
lowercase = ['past_key_values']
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , A=True , A=None , A=3 , A=100 , A=6 , A=2048 , A=8 , A=6 , A=2048 , A=8 , A=0.0 , A=0.0 , A=True , A="relu" , A=256 , A=0.1 , A=0.0 , A=0.0 , A=0.0_2 , A=1.0 , A=False , A="sine" , A="resnet50" , A=True , A=False , A=1 , A=5 , A=2 , A=1 , A=1 , A=5 , A=2 , A=0.1 , **A , ) -> List[str]:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCAmelCase : Optional[Any] = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(A , A ):
UpperCAmelCase : Any = backbone_config.get("""model_type""" )
UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : List[Any] = config_class.from_dict(A )
# set timm attributes to None
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = None, None, None
UpperCAmelCase : Dict = use_timm_backbone
UpperCAmelCase : Any = backbone_config
UpperCAmelCase : List[Any] = num_channels
UpperCAmelCase : int = num_queries
UpperCAmelCase : List[str] = d_model
UpperCAmelCase : Tuple = encoder_ffn_dim
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Any = encoder_attention_heads
UpperCAmelCase : Optional[Any] = decoder_ffn_dim
UpperCAmelCase : Optional[int] = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : str = dropout
UpperCAmelCase : Tuple = attention_dropout
UpperCAmelCase : Dict = activation_dropout
UpperCAmelCase : Tuple = activation_function
UpperCAmelCase : List[Any] = init_std
UpperCAmelCase : str = init_xavier_std
UpperCAmelCase : List[Any] = encoder_layerdrop
UpperCAmelCase : int = decoder_layerdrop
UpperCAmelCase : List[Any] = encoder_layers
UpperCAmelCase : Union[str, Any] = auxiliary_loss
UpperCAmelCase : str = position_embedding_type
UpperCAmelCase : Union[str, Any] = backbone
UpperCAmelCase : List[str] = use_pretrained_backbone
UpperCAmelCase : Optional[int] = dilation
# Hungarian matcher
UpperCAmelCase : Union[str, Any] = class_cost
UpperCAmelCase : Optional[Any] = bbox_cost
UpperCAmelCase : List[Any] = giou_cost
# Loss coefficients
UpperCAmelCase : int = mask_loss_coefficient
UpperCAmelCase : Optional[int] = dice_loss_coefficient
UpperCAmelCase : Dict = bbox_loss_coefficient
UpperCAmelCase : Any = giou_loss_coefficient
UpperCAmelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=A , **A )
@property
def _lowercase( self ) -> int:
return self.encoder_attention_heads
@property
def _lowercase( self ) -> int:
return self.d_model
@classmethod
def _lowercase( cls , A , **A ) -> Dict:
return cls(backbone_config=A , **A )
def _lowercase( self ) -> Dict[str, any]:
UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCAmelCase : Any = self.backbone_config.to_dict()
UpperCAmelCase : Optional[Any] = self.__class__.model_type
return output
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-5
@property
def _lowercase( self ) -> int:
return 12
| 338
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if p < 2:
raise ValueError("p should not be less than 2!" )
elif p == 2:
return True
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : Union[str, Any] = (1 << p) - 1
for _ in range(p - 2 ):
_UpperCAmelCase : int = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 234
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape
_UpperCAmelCase : str = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None ):
_UpperCAmelCase : int = {}
for old_key in state_dict.keys():
_UpperCAmelCase : Tuple = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCAmelCase : Optional[int] = key.replace("moe_layer.experts.0" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_UpperCAmelCase : Any = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
_UpperCAmelCase : List[Any] = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
_UpperCAmelCase : Tuple = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
_UpperCAmelCase : List[Any] = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
_UpperCAmelCase : List[Any] = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
_UpperCAmelCase : Any = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
_UpperCAmelCase : int = key.replace("final_layer_norm" , "ff_layer_norm" )
_UpperCAmelCase : Tuple = state_dict[old_key]
return new_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = WEIGHTS_NAME ):
_UpperCAmelCase : Optional[int] = []
_UpperCAmelCase : Optional[Any] = 0
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
for expert in range(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = torch.load(__lowerCAmelCase )["model"]
remove_ignore_keys_(__lowerCAmelCase )
_UpperCAmelCase : Dict = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[str] = os.path.join(
__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowerCAmelCase )[0]].dtype )
# Add the last block
_UpperCAmelCase : Tuple = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(__lowerCAmelCase )+1:05d}-of-???.bin""" ) )
_UpperCAmelCase : Union[str, Any] = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = rename_fairseq_keys(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Any = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowerCAmelCase ) == 1:
_UpperCAmelCase : List[str] = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
torch.save(__lowerCAmelCase , __lowerCAmelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowerCAmelCase , __lowerCAmelCase )
# Otherwise, let's build the index
_UpperCAmelCase : Union[str, Any] = {}
for idx, shard in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = weights_name.replace(".bin" , F"""-{idx+1:05d}-of-{len(__lowerCAmelCase ):05d}.bin""" )
_UpperCAmelCase : List[Any] = os.path.join(__lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
for key in shard:
_UpperCAmelCase : List[Any] = shard_file
# Add the metadata
_UpperCAmelCase : Any = {"total_size": total_size}
_UpperCAmelCase : List[str] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : Tuple = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ ,lowerCamelCase__ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowerCamelCase__ = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowerCamelCase__ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 234
| 1
|
def A ( _UpperCAmelCase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = [int(UpperCAmelCase_ ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(UpperCAmelCase_ ) == 4 and all(0 <= int(UpperCAmelCase_ ) <= 254 for octet in octets )
if __name__ == "__main__":
UpperCAmelCase__ = input().strip()
UpperCAmelCase__ = "valid" if is_ip_va_address_valid(ip) else "invalid"
print(f"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 357
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( A ):
def _lowerCamelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(A , 'hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'neck_hidden_sizes'))
self.parent.assertTrue(hasattr(A , 'num_attention_heads'))
class __lowerCAmelCase :
def __init__( self : int , A : Tuple , A : List[str]=13 , A : List[str]=32 , A : List[str]=2 , A : List[str]=3 , A : List[Any]=6_40 , A : Any=4 , A : int="silu" , A : int=3 , A : Dict=32 , A : List[Any]=0.1 , A : Optional[Any]=0.1 , A : Optional[int]=0.1 , A : List[str]=0.0_2 , A : int=True , A : Any=True , A : List[str]=10 , A : Tuple=None , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = last_hidden_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = conv_kernel_size
_UpperCAmelCase = output_stride
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = classifier_dropout_prob
_UpperCAmelCase = use_labels
_UpperCAmelCase = is_training
_UpperCAmelCase = num_labels
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
def _lowerCamelCase ( self : Union[str, Any]) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels)
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCamelCase ( self : str) -> int:
"""simple docstring"""
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : List[Any] , A : Dict , A : Tuple , A : int , A : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTModel(config=A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : int , A : Any , A : List[Any] , A : List[Any] , A : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForImageClassification(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _lowerCamelCase ( self : int , A : Tuple , A : Optional[Any] , A : Union[str, Any] , A : List[Any]) -> int:
"""simple docstring"""
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileViTForSemanticSegmentation(A)
model.to(A)
model.eval()
_UpperCAmelCase = model(A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase = model(A , labels=A)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCamelCase ( self : int) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( A , A , unittest.TestCase ):
UpperCamelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': MobileViTModel,
'''image-classification''': MobileViTForImageClassification,
'''image-segmentation''': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowerCamelCase ( self : str) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = MobileViTModelTester(self)
_UpperCAmelCase = MobileViTConfigTester(self , config_class=A , has_text_modality=A)
def _lowerCamelCase ( self : Optional[int]) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds')
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings')
def _lowerCamelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='MobileViT does not output attentions')
def _lowerCamelCase ( self : Any) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A)
_UpperCAmelCase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def _lowerCamelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
pass
def _lowerCamelCase ( self : Tuple) -> str:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A)
def _lowerCamelCase ( self : Optional[int]) -> Any:
"""simple docstring"""
def check_hidden_states_output(A : List[str] , A : Union[str, Any] , A : int):
_UpperCAmelCase = model_class(A)
model.to(A)
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(A , A))
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = 5
self.assertEqual(len(A) , A)
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase = 2
for i in range(len(A)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(A , A , A)
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A)
def _lowerCamelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A)
@slow
def _lowerCamelCase ( self : Optional[int]) -> Union[str, Any]:
"""simple docstring"""
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = MobileViTModel.from_pretrained(A)
self.assertIsNotNone(A)
def A ( ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Tuple) -> Dict:
"""simple docstring"""
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small') if is_vision_available() else None
@slow
def _lowerCamelCase ( self : List[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small').to(A)
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , A)
_UpperCAmelCase = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3]).to(A)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = model.to(A)
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , A)
_UpperCAmelCase = torch.tensor(
[
[[6.9_7_1_3, 6.9_7_8_6, 7.2_4_2_2], [7.2_8_9_3, 7.2_8_2_5, 7.4_4_4_6], [7.6_5_8_0, 7.8_7_9_7, 7.9_4_2_0]],
[[-1_0.6_8_6_9, -1_0.3_2_5_0, -1_0.3_4_7_1], [-1_0.4_2_2_8, -9.9_8_6_8, -9.7_1_3_2], [-1_1.0_4_0_5, -1_1.0_2_2_1, -1_0.7_3_1_8]],
[[-3.3_0_8_9, -2.8_5_3_9, -2.6_7_4_0], [-3.2_7_0_6, -2.5_6_2_1, -2.5_1_0_8], [-3.2_5_3_4, -2.6_6_1_5, -2.6_6_5_1]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4))
@slow
def _lowerCamelCase ( self : List[Any]) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = model.to(A)
_UpperCAmelCase = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small')
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A , return_tensors='pt').to(A)
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A)
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)])
_UpperCAmelCase = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , A)
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A)
_UpperCAmelCase = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , A)
| 290
| 0
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCAmelCase: str = logging.get_logger(__name__)
lowerCAmelCase: int = TypeVar('DatasetType', Dataset, IterableDataset)
def lowerCamelCase__ ( _A , _A = None , _A = None , _A = None , _A = None , _A = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_A )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.""" )
if i == 0:
a , a : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
else:
return _interleave_iterable_datasets(
_A , _A , _A , info=_A , split=_A , stopping_strategy=_A )
def lowerCamelCase__ ( _A , _A = None , _A = None , _A = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(_A ):
if not isinstance(_A , (Dataset, IterableDataset) ):
if isinstance(_A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
'is an empty dataset dictionary.' )
raise ValueError(
f"""Dataset at position {i} has at least one split: {list(_A )}\n"""
f"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_A ) )}']""" )
raise ValueError(
f"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_A ).__name__}.""" )
if i == 0:
a , a : Optional[Any] = (
(Dataset, IterableDataset) if isinstance(_A , _A ) else (IterableDataset, Dataset)
)
elif not isinstance(_A , _A ):
raise ValueError(
f"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_A , info=_A , split=_A , axis=_A )
else:
return _concatenate_iterable_datasets(_A , info=_A , split=_A , axis=_A )
| 297
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = StableUnCLIPImgaImgPipeline
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowercase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase__ = frozenset([] )
def lowercase_ ( self : int ):
a : Dict = 32
a : str = embedder_hidden_size
# image encoding components
a : List[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
a : Dict = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=__snake_case , projection_dim=__snake_case , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
a : Dict = StableUnCLIPImageNormalizer(embedding_dim=__snake_case )
a : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
a : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__snake_case , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
a : Union[str, Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__snake_case , layers_per_block=1 , upcast_attention=__snake_case , use_linear_projection=__snake_case , )
torch.manual_seed(0 )
a : List[Any] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.00085 , beta_end=0.012 , prediction_type='v_prediction' , set_alpha_to_one=__snake_case , steps_offset=1 , )
torch.manual_seed(0 )
a : List[str] = AutoencoderKL()
a : str = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def lowercase_ ( self : Tuple , __snake_case : List[str] , __snake_case : Union[str, Any]=0 , __snake_case : Tuple=True ):
if str(__snake_case ).startswith('mps' ):
a : Tuple = torch.manual_seed(__snake_case )
else:
a : List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if pil_image:
a : Optional[Any] = input_image * 0.5 + 0.5
a : Optional[Any] = input_image.clamp(0 , 1 )
a : Dict = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
a : int = DiffusionPipeline.numpy_to_pil(__snake_case )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def lowercase_ ( self : Optional[Any] ):
a : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Any = StableUnCLIPImgaImgPipeline(**__snake_case )
a : Tuple = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
a : Union[str, Any] = self.get_dummy_inputs(__snake_case )
inputs.update({'image_embeds': None} )
a : str = sd_pipe(**__snake_case ).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
a : Optional[int] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : List[str] ):
a : int = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=__snake_case )
def lowercase_ ( self : int ):
a : Optional[int] = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=__snake_case )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=__snake_case )
@slow
@require_torch_gpu
class a__( unittest.TestCase ):
def lowercase_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy' )
a : Optional[Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-l-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = torch.Generator(device='cpu' ).manual_seed(0 )
a : Optional[int] = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Optional[int] ):
a : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy' )
a : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
a : str = pipe(__snake_case , 'anime turle' , generator=__snake_case , output_type='np' )
a : List[str] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : List[str] = StableUnCLIPImgaImgPipeline.from_pretrained(
'fusing/stable-unclip-2-1-h-img2img' , torch_dtype=torch.floataa )
a : Optional[Any] = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
a : Optional[int] = pipe(
__snake_case , 'anime turtle' , num_inference_steps=2 , output_type='np' , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 297
| 1
|
from math import factorial
def snake_case (UpperCAmelCase__ = 1_0_0 ) -> int:
return sum(int(UpperCAmelCase__ ) for x in str(factorial(UpperCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 292
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
A_ : str = [
'word_embeddings_layernorm.weight',
'word_embeddings_layernorm.bias',
'input_layernorm.weight',
'input_layernorm.bias',
'post_attention_layernorm.weight',
'post_attention_layernorm.bias',
'self_attention.dense.bias',
'mlp.dense_4h_to_h.bias',
'ln_f.weight',
'ln_f.bias',
]
A_ : Optional[int] = [
'mlp.dense_4h_to_h.weight',
'self_attention.dense.weight',
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> List[str]:
UpperCamelCase_: Dict = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCamelCase_: Tuple = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase__ )[1] )
layer_number -= 3
return F'''h.{layer_number}.''' + key
def snake_case (UpperCAmelCase__ ) -> List[str]:
if dtype == torch.bool:
return 1 / 8
UpperCamelCase_: Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase__ ) )
if bit_search is None:
raise ValueError(F'''`dtype` is not a valid dtype: {dtype}.''' )
UpperCamelCase_: List[Any] = int(bit_search.groups()[0] )
return bit_size // 8
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
# Construct model
if bloom_config_file == "":
UpperCamelCase_: List[str] = BloomConfig()
else:
UpperCamelCase_: List[str] = BloomConfig.from_json_file(UpperCAmelCase__ )
if shard_model:
UpperCamelCase_: str = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: List[str] = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Optional[int] = {'weight_map': {}, 'metadata': {}}
UpperCamelCase_: str = 0
UpperCamelCase_: Optional[Any] = None
UpperCamelCase_: int = BloomConfig()
for j, file in enumerate(UpperCAmelCase__ ):
print('Processing file: {}'.format(UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: List[Any] = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: List[str] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Optional[int] = list(temp.keys() )
for key in keys:
UpperCamelCase_: List[Any] = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Dict = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: List[Any] = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Dict = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Optional[int] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase__ , os.path.join(
UpperCAmelCase__ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCamelCase_: int = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCamelCase_: Dict = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase__ ) ).zfill(5 ) )
UpperCamelCase_: Union[str, Any] = BloomConfig()
UpperCamelCase_: Any = pytorch_dump_folder_path + '/' + CONFIG_NAME
UpperCamelCase_: Optional[int] = total_size
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase__ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
UpperCamelCase_: Tuple = json.dumps(UpperCAmelCase__ , indent=2 , sort_keys=UpperCAmelCase__ ) + '\n'
f.write(UpperCAmelCase__ )
else:
UpperCamelCase_: Optional[Any] = BloomModel(UpperCAmelCase__ )
UpperCamelCase_: Tuple = os.listdir(UpperCAmelCase__ )
UpperCamelCase_: Tuple = sorted(filter(lambda UpperCAmelCase__ : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase__ ) )
UpperCamelCase_: Tuple = None
for i, file in enumerate(UpperCAmelCase__ ):
UpperCamelCase_: Union[str, Any] = None
for i in range(UpperCAmelCase__ ):
# load all TP files
UpperCamelCase_: Any = file.replace('model_00' , F'''model_0{i}''' )
UpperCamelCase_: Union[str, Any] = torch.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) , map_location='cpu' )
# Rename keys in the transformers names
UpperCamelCase_: Dict = list(temp.keys() )
for key in keys:
UpperCamelCase_: Any = temp.pop(UpperCAmelCase__ )
if tensors is None:
UpperCamelCase_: Any = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCamelCase_: int = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCamelCase_: Optional[int] = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase__ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase__ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCamelCase_: Tuple = tensors[key] / pretraining_tp
UpperCamelCase_: Any = model.load_state_dict(UpperCAmelCase__ , strict=UpperCAmelCase__ )
assert not other_keys.unexpected_keys, F'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
UpperCamelCase_: Any = set(other_keys.missing_keys )
else:
UpperCamelCase_: int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
UpperCamelCase_: Optional[Any] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase_: str = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
UpperCamelCase_: Tuple = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
A_ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 292
| 1
|
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
a_ : Optional[Any] = """bart"""
a_ : str = ["""past_key_values"""]
a_ : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , __UpperCAmelCase=5_02_65 , __UpperCAmelCase=10_24 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=12 , __UpperCAmelCase=40_96 , __UpperCAmelCase=16 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase="gelu" , __UpperCAmelCase=10_24 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=0.0 , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=0 , __UpperCAmelCase=2 , __UpperCAmelCase=True , __UpperCAmelCase=2 , __UpperCAmelCase=2 , **__UpperCAmelCase , ) ->Optional[Any]:
a_ = vocab_size
a_ = max_position_embeddings
a_ = d_model
a_ = encoder_ffn_dim
a_ = encoder_layers
a_ = encoder_attention_heads
a_ = decoder_ffn_dim
a_ = decoder_layers
a_ = decoder_attention_heads
a_ = dropout
a_ = attention_dropout
a_ = activation_dropout
a_ = activation_function
a_ = init_std
a_ = encoder_layerdrop
a_ = decoder_layerdrop
a_ = classifier_dropout
a_ = use_cache
a_ = encoder_layers
a_ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__UpperCAmelCase , pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , is_encoder_decoder=__UpperCAmelCase , decoder_start_token_id=__UpperCAmelCase , forced_eos_token_id=__UpperCAmelCase , **__UpperCAmelCase , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __UpperCAmelCase):
a_ = self.bos_token_id
warnings.warn(
F'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed.")
class snake_case ( SCREAMING_SNAKE_CASE_ ):
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
a_ = {0: "batch"}
a_ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
a_ = {0: "batch", 1: "decoder_sequence"}
a_ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
a_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
a_ , a_ = self.num_layers
for i in range(__UpperCAmelCase):
a_ = {0: "batch", 2: "past_sequence + sequence"}
a_ = {0: "batch", 2: "past_sequence + sequence"}
else:
a_ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def UpperCAmelCase__ ( self) ->Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
a_ = super().outputs
else:
a_ = super(__UpperCAmelCase , self).outputs
if self.use_past:
a_ , a_ = self.num_layers
for i in range(__UpperCAmelCase):
a_ = {0: "batch", 2: "past_sequence + sequence"}
a_ = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) ->Mapping[str, Any]:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# Generate decoder inputs
a_ = seq_length if not self.use_past else 1
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
a_ = {F'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
a_ = dict(**__UpperCAmelCase , **__UpperCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
a_ , a_ = common_inputs["input_ids"].shape
a_ = common_inputs["decoder_input_ids"].shape[1]
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = decoder_seq_length + 3
a_ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a_ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase)] , dim=1)
a_ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a_ , a_ = self.num_layers
a_ = min(__UpperCAmelCase , __UpperCAmelCase)
a_ = max(__UpperCAmelCase , __UpperCAmelCase) - min_num_layers
a_ = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__UpperCAmelCase):
common_inputs["past_key_values"].append(
(
torch.zeros(__UpperCAmelCase),
torch.zeros(__UpperCAmelCase),
torch.zeros(__UpperCAmelCase),
torch.zeros(__UpperCAmelCase),
))
# TODO: test this.
a_ = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__UpperCAmelCase , __UpperCAmelCase):
common_inputs["past_key_values"].append((torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)))
return common_inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) ->Mapping[str, Any]:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
a_ , a_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a_ = seqlen + 2
a_ , a_ = self.num_layers
a_ , a_ = self.num_attention_heads
a_ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a_ = common_inputs["attention_mask"].dtype
a_ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase)] , dim=1)
a_ = [
(torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)) for _ in range(__UpperCAmelCase)
]
return common_inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) ->Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a_ = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a_ = tokenizer.num_special_tokens_to_add(__UpperCAmelCase)
a_ = compute_effective_axis_dimension(
__UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__UpperCAmelCase)
# Generate dummy inputs according to compute batch and sequence
a_ = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
a_ = dict(tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase))
return common_inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = -1 , __UpperCAmelCase = -1 , __UpperCAmelCase = False , __UpperCAmelCase = None , ) ->Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
elif self.task == "causal-lm":
a_ = self._generate_dummy_inputs_for_causal_lm(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
else:
a_ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
return common_inputs
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
if self.task in ["default", "seq2seq-lm"]:
a_ = super()._flatten_past_key_values_(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
else:
a_ = super(__UpperCAmelCase , self)._flatten_past_key_values_(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
| 243
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCAmelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 243
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCAmelCase (__UpperCamelCase : Optional[int] ):
"""simple docstring"""
__UpperCamelCase =torch.load(__UpperCamelCase , map_location='''cpu''' )
if "model" in sd.keys():
__UpperCamelCase =torch.load(__UpperCamelCase , map_location='''cpu''' )['''model''']
# pop unnecessary weights
__UpperCamelCase =[
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__UpperCamelCase )
__UpperCamelCase ={
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
__UpperCamelCase =sd.pop(__UpperCamelCase )
__UpperCamelCase =list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
__UpperCamelCase =sd[key]
# We split QKV in separate Q,K,V
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.q_proj.''' )
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.k_proj.''' )
__UpperCamelCase =key.replace('''.qkv_proj.''' , '''.v_proj.''' )
__UpperCamelCase =value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =torch.split(__UpperCamelCase , depth // 3 , dim=0 )
__UpperCamelCase =q
__UpperCamelCase =k
__UpperCamelCase =v
del sd[key]
return sd
@torch.no_grad()
def lowerCAmelCase (__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=None ):
"""simple docstring"""
__UpperCamelCase =load_checkpoint(__UpperCamelCase )
if config is not None:
__UpperCamelCase =OPTConfig.from_pretrained(__UpperCamelCase )
else:
__UpperCamelCase =OPTConfig()
__UpperCamelCase =OPTModel(__UpperCamelCase ).half().eval()
model.load_state_dict(__UpperCamelCase )
# Check results
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--fairseq_path''',
type=str,
help=(
'''path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'''
''' https://huggingface.co/models?other=opt_metasq'''
),
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--hf_config''', default=None, type=str, help='''Define HF config.''')
__lowercase = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 360
|
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : str ):
"""simple docstring"""
__UpperCamelCase =0
# if input_string is "aba" than new_input_string become "a|b|a"
__UpperCamelCase =''''''
__UpperCamelCase =''''''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__UpperCamelCase ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__UpperCamelCase , __UpperCamelCase =0, 0
# length[i] shows the length of palindromic substring with center i
__UpperCamelCase =[1 for i in range(len(__UpperCamelCase ) )]
# for each character in new_string find corresponding palindromic string
__UpperCamelCase =0
for j in range(len(__UpperCamelCase ) ):
__UpperCamelCase =1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__UpperCamelCase )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__UpperCamelCase =2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__UpperCamelCase =j - k + 1 # noqa: E741
__UpperCamelCase =j + k - 1
# update max_length and start position
if max_length < length[j]:
__UpperCamelCase =length[j]
__UpperCamelCase =j
# create that string
__UpperCamelCase =new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCamelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Any , _a : Optional[Any]=3 , _a : Any=32 , _a : Dict=3 , _a : str=10 , _a : str=[8, 16, 32, 64] , _a : Dict=[1, 1, 2, 1] , _a : Optional[int]=True , _a : Any=True , _a : List[Any]="relu" , _a : str=3 , _a : Dict=None , _a : Dict=["stage2", "stage3", "stage4"] , _a : str=[2, 3, 4] , _a : Optional[int]=1 , ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : List[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : List[str] = embeddings_size
__lowerCamelCase : List[str] = hidden_sizes
__lowerCamelCase : str = depths
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Optional[int] = num_labels
__lowerCamelCase : Tuple = scope
__lowerCamelCase : Tuple = len(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : int = out_features
__lowerCamelCase : int = out_indices
__lowerCamelCase : str = num_groups
def _lowercase ( self : Optional[int] ) -> Optional[Any]:
__lowerCamelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.num_labels )
__lowerCamelCase : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Union[str, Any] ) -> Tuple:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def _lowercase ( self : Union[str, Any] , _a : List[Any] , _a : Tuple , _a : List[Any] ) -> Optional[Any]:
__lowerCamelCase : int = BitModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Tuple = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowercase ( self : Optional[int] , _a : Optional[Any] , _a : Any , _a : List[str] ) -> Dict:
__lowerCamelCase : Dict = self.num_labels
__lowerCamelCase : Any = BitForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Any = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Union[str, Any] , _a : int , _a : Optional[Any] , _a : str ) -> List[str]:
__lowerCamelCase : List[str] = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = BitBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__lowerCamelCase : Dict = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _lowercase ( self : List[str] ) -> Optional[int]:
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase : int = config_and_inputs
__lowerCamelCase : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ =(BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a_ =(
{"""feature-extraction""": BitModel, """image-classification""": BitForImageClassification}
if is_torch_available()
else {}
)
a_ =False
a_ =False
a_ =False
a_ =False
a_ =False
def _lowercase ( self : List[Any] ) -> List[Any]:
__lowerCamelCase : List[str] = BitModelTester(self )
__lowerCamelCase : int = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ) -> List[str]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ) -> Any:
return
@unittest.skip(reason='Bit does not output attentions' )
def _lowercase ( self : int ) -> Optional[Any]:
pass
@unittest.skip(reason='Bit does not use inputs_embeds' )
def _lowercase ( self : Dict ) -> Optional[int]:
pass
@unittest.skip(reason='Bit does not support input and output embeddings' )
def _lowercase ( self : Union[str, Any] ) -> Dict:
pass
def _lowercase ( self : str ) -> Optional[Any]:
__lowerCamelCase ,__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Optional[Any] = [*signature.parameters.keys()]
__lowerCamelCase : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ) -> Any:
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self : Any ) -> Union[str, Any]:
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(config=__SCREAMING_SNAKE_CASE )
for name, module in model.named_modules():
if isinstance(__SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def _lowercase ( self : str ) -> Dict:
def check_hidden_states_output(_a : str , _a : int , _a : Optional[Any] ):
__lowerCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__lowerCamelCase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase : str = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCamelCase ,__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = ['preactivation', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCamelCase : int = layer_type
__lowerCamelCase : Tuple = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : str = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Bit does not use feedforward chunking' )
def _lowercase ( self : Tuple ) -> List[str]:
pass
def _lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _lowercase ( self : Any ) -> List[Any]:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple = BitModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def a_ ( ) -> Tuple:
__lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[Any] ) -> int:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[Any]:
__lowerCamelCase : Union[str, Any] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__SCREAMING_SNAKE_CASE )
__lowerCamelCase : List[str] = self.default_image_processor
__lowerCamelCase : Dict = prepare_img()
__lowerCamelCase : int = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__lowerCamelCase : List[str] = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[int] = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@require_torch
class lowerCamelCase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ =(BitBackbone,) if is_torch_available() else ()
a_ =BitConfig
a_ =False
def _lowercase ( self : int ) -> Dict:
__lowerCamelCase : Tuple = BitModelTester(self )
| 208
|
from collections import defaultdict
from math import ceil, sqrt
def SCREAMING_SNAKE_CASE_ ( snake_case__ = 1_0_0_0_0_0_0 , snake_case__ = 1_0 ) -> int:
lowerCAmelCase = defaultdict(snake_case__ )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
lowerCAmelCase = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
lowerCAmelCase = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(snake_case__ , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 338
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( __UpperCAmelCase , unittest.TestCase ):
__A : str = XLMTokenizer
__A : str = False
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase :Dict = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
lowercase :Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase :Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
lowercase :Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase :List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(snake_case__ ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(snake_case__ ) )
def __snake_case ( self : Tuple , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Dict = '''lower newer'''
lowercase :Optional[int] = '''lower newer'''
return input_text, output_text
def __snake_case ( self : Any ):
'''simple docstring'''
lowercase :Union[str, Any] = XLMTokenizer(self.vocab_file , self.merges_file )
lowercase :Union[str, Any] = '''lower'''
lowercase :Union[str, Any] = ['''low''', '''er</w>''']
lowercase :str = tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase :List[Any] = tokens + ['''<unk>''']
lowercase :Optional[Any] = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
@slow
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Optional[Any] = XLMTokenizer.from_pretrained('''xlm-mlm-en-2048''' )
lowercase :List[str] = tokenizer.encode('''sequence builders''' , add_special_tokens=snake_case__ )
lowercase :Dict = tokenizer.encode('''multi-sequence build''' , add_special_tokens=snake_case__ )
lowercase :List[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ )
lowercase :List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 172
|
"""simple docstring"""
def lowerCamelCase (a_ :int) -> None:
lowercase :Tuple = generate_pascal_triangle(a_)
for row_idx in range(a_):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=''' ''')
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''')
else:
print(triangle[row_idx][col_idx] , end='''''')
print()
def lowerCamelCase (a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
lowercase :list[list[int]] = []
for current_row_idx in range(a_):
lowercase :Union[str, Any] = populate_current_row(a_ , a_)
triangle.append(a_)
return triangle
def lowerCamelCase (a_ :list[list[int]] , a_ :int) -> list[int]:
lowercase :List[str] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowercase , lowercase :Dict = 1, 1
for current_col_idx in range(1 , a_):
calculate_current_element(
a_ , a_ , a_ , a_)
return current_row
def lowerCamelCase (a_ :list[list[int]] , a_ :list[int] , a_ :int , a_ :int , ) -> None:
lowercase :str = triangle[current_row_idx - 1][current_col_idx - 1]
lowercase :Dict = triangle[current_row_idx - 1][current_col_idx]
lowercase :Any = above_to_left_elt + above_to_right_elt
def lowerCamelCase (a_ :int) -> list[list[int]]:
if not isinstance(a_ , a_):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''')
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''')
lowercase :list[list[int]] = [[1]]
for row_index in range(1 , a_):
lowercase :Union[str, Any] = [0] + result[-1] + [0]
lowercase :Union[str, Any] = row_index + 1
# Calculate the number of distinct elements in a row
lowercase :List[str] = sum(divmod(a_ , 2))
lowercase :Dict = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
lowercase :Optional[int] = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowercase :Dict = row_first_half + row_second_half
result.append(a_)
return result
def lowerCamelCase () -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(a_ :Callable , a_ :int) -> None:
lowercase :int = F"""{func.__name__}({value})"""
lowercase :Union[str, Any] = timeit(F"""__main__.{call}""" , setup='''import __main__''')
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""")
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(a_ , a_)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 172
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
@slow
def _lowercase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__magic_name__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
__magic_name__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
__magic_name__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
__magic_name__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__magic_name__ = model(UpperCamelCase__ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , UpperCamelCase__ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCamelCase__ , atol=1E-3 ) )
| 88
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class __snake_case ( __lowerCAmelCase ):
a__ = 42
a__ = jnp.floataa
a__ = True
def lowerCamelCase_ ( self) -> int:
'''simple docstring'''
super().setup()
a__: int = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *lowercase , **lowercase) -> Dict:
'''simple docstring'''
a__: Dict = super().__call__(*lowercase , **lowercase)
a__: str = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class __snake_case ( __lowerCAmelCase ):
a__ = FlaxBigBirdForNaturalQuestionsModule
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
def cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
a__: Any = logits.shape[-1]
a__: List[Any] = (labels[..., None] == jnp.arange(_SCREAMING_SNAKE_CASE )[None]).astype('f4' )
a__: List[str] = jax.nn.log_softmax(_SCREAMING_SNAKE_CASE , axis=-1 )
a__: Dict = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a__: str = reduction(_SCREAMING_SNAKE_CASE )
return loss
a__: Tuple = partial(_SCREAMING_SNAKE_CASE , reduction=jnp.mean )
a__: List[str] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Union[str, Any] = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = cross_entropy(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class __snake_case :
a__ = "google/bigbird-roberta-base"
a__ = 3000
a__ = 1_0500
a__ = 128
a__ = 3
a__ = 1
a__ = 5
# tx_args
a__ = 3e-5
a__ = 0.0
a__ = 2_0000
a__ = 0.0095
a__ = "bigbird-roberta-natural-questions"
a__ = "training-expt"
a__ = "data/nq-training.jsonl"
a__ = "data/nq-validation.jsonl"
def lowerCamelCase_ ( self) -> Optional[Any]:
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=lowercase)
a__: str = os.path.join(self.base_dir , self.save_dir)
a__: List[str] = self.batch_size_per_device * jax.device_count()
@dataclass
class __snake_case :
a__ = 42
a__ = 4096 # no dynamic padding on TPUs
def __call__( self , lowercase) -> List[Any]:
'''simple docstring'''
a__: int = self.collate_fn(lowercase)
a__: Optional[int] = jax.tree_util.tree_map(lowercase , lowercase)
return batch
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__ , a__: Dict = self.fetch_inputs(features['input_ids'])
a__: List[Any] = {
'input_ids': jnp.array(lowercase , dtype=jnp.intaa),
'attention_mask': jnp.array(lowercase , dtype=jnp.intaa),
'start_labels': jnp.array(features['start_token'] , dtype=jnp.intaa),
'end_labels': jnp.array(features['end_token'] , dtype=jnp.intaa),
'pooled_labels': jnp.array(features['category'] , dtype=jnp.intaa),
}
return batch
def lowerCamelCase_ ( self , lowercase) -> List[str]:
'''simple docstring'''
a__: List[Any] = [self._fetch_inputs(lowercase) for ids in input_ids]
return zip(*lowercase)
def lowerCamelCase_ ( self , lowercase) -> Dict:
'''simple docstring'''
a__: Union[str, Any] = [1 for _ in range(len(lowercase))]
while len(lowercase) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) ->List[Any]:
if seed is not None:
a__: int = dataset.shuffle(seed=_SCREAMING_SNAKE_CASE )
for i in range(len(_SCREAMING_SNAKE_CASE ) // batch_size ):
a__: Union[str, Any] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_SCREAMING_SNAKE_CASE )
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Any:
def loss_fn(_SCREAMING_SNAKE_CASE ):
a__: str = model_inputs.pop('start_labels' )
a__: Dict = model_inputs.pop('end_labels' )
a__: Optional[int] = model_inputs.pop('pooled_labels' )
a__: Optional[Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , dropout_rng=_SCREAMING_SNAKE_CASE , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: Optional[int] = outputs
return state.loss_fn(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
a__ , a__: Union[str, Any] = jax.random.split(_SCREAMING_SNAKE_CASE )
a__: List[Any] = jax.value_and_grad(_SCREAMING_SNAKE_CASE )
a__ , a__: str = grad_fn(state.params )
a__: Optional[int] = jax.lax.pmean({'loss': loss} , axis_name='batch' )
a__: int = jax.lax.pmean(_SCREAMING_SNAKE_CASE , 'batch' )
a__: Union[str, Any] = state.apply_gradients(grads=_SCREAMING_SNAKE_CASE )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='batch' )
def __a ( _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) ->Optional[Any]:
a__: Optional[int] = model_inputs.pop('start_labels' )
a__: int = model_inputs.pop('end_labels' )
a__: Dict = model_inputs.pop('pooled_labels' )
a__: Union[str, Any] = state.apply_fn(**_SCREAMING_SNAKE_CASE , params=state.params , train=_SCREAMING_SNAKE_CASE )
a__ , a__ , a__: int = outputs
a__: Optional[int] = state.loss_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Tuple = jax.lax.pmean({'loss': loss} , axis_name='batch' )
return metrics
class __snake_case ( train_state.TrainState ):
a__ = struct.field(pytree_node=__lowerCAmelCase )
@dataclass
class __snake_case :
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = 42
a__ = None
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase=None) -> Optional[int]:
'''simple docstring'''
a__: Dict = model.params
a__: Any = TrainState.create(
apply_fn=model.__call__ , params=lowercase , tx=lowercase , loss_fn=lowercase , )
if ckpt_dir is not None:
a__ , a__ , a__ , a__ , a__: Any = restore_checkpoint(lowercase , lowercase)
a__: Any = {
'lr': args.lr,
'init_lr': args.init_lr,
'warmup_steps': args.warmup_steps,
'num_train_steps': num_train_steps,
'weight_decay': args.weight_decay,
}
a__ , a__: str = build_tx(**lowercase)
a__: Optional[Any] = train_state.TrainState(
step=lowercase , apply_fn=model.__call__ , params=lowercase , tx=lowercase , opt_state=lowercase , )
a__: int = args
a__: Union[str, Any] = data_collator
a__: Any = lr
a__: Dict = params
a__: Tuple = jax_utils.replicate(lowercase)
return state
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__: int = self.args
a__: str = len(lowercase) // args.batch_size
a__: Tuple = jax.random.PRNGKey(0)
a__: List[Any] = jax.random.split(lowercase , jax.device_count())
for epoch in range(args.max_epochs):
a__: str = jnp.array(0 , dtype=jnp.floataa)
a__: Tuple = get_batched_dataset(lowercase , args.batch_size , seed=lowercase)
a__: Optional[int] = 0
for batch in tqdm(lowercase , total=lowercase , desc=f'Running EPOCH-{epoch}'):
a__: List[str] = self.data_collator(lowercase)
a__ , a__ , a__: int = self.train_step_fn(lowercase , lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
if i % args.logging_steps == 0:
a__: List[Any] = jax_utils.unreplicate(state.step)
a__: Tuple = running_loss.item() / i
a__: Optional[Any] = self.scheduler_fn(state_step - 1)
a__: List[Any] = self.evaluate(lowercase , lowercase)
a__: List[str] = {
'step': state_step.item(),
'eval_loss': eval_loss.item(),
'tr_loss': tr_loss,
'lr': lr.item(),
}
tqdm.write(str(lowercase))
self.logger.log(lowercase , commit=lowercase)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=lowercase)
def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__: Tuple = get_batched_dataset(lowercase , self.args.batch_size)
a__: Dict = len(lowercase) // self.args.batch_size
a__: Tuple = jnp.array(0 , dtype=jnp.floataa)
a__: List[Any] = 0
for batch in tqdm(lowercase , total=lowercase , desc='Evaluating ... '):
a__: str = self.data_collator(lowercase)
a__: List[str] = self.val_step_fn(lowercase , **lowercase)
running_loss += jax_utils.unreplicate(metrics['loss'])
i += 1
return running_loss / i
def lowerCamelCase_ ( self , lowercase , lowercase) -> Any:
'''simple docstring'''
a__: List[Any] = jax_utils.unreplicate(lowercase)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=' ... ')
self.model_save_fn(lowercase , params=state.params)
with open(os.path.join(lowercase , 'opt_state.msgpack') , 'wb') as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(lowercase , 'args.joblib'))
joblib.dump(self.data_collator , os.path.join(lowercase , 'data_collator.joblib'))
with open(os.path.join(lowercase , 'training_state.json') , 'w') as f:
json.dump({'step': state.step.item()} , lowercase)
print('DONE')
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=' ... ' )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'flax_model.msgpack' ) , 'rb' ) as f:
a__: int = from_bytes(state.params , f.read() )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'opt_state.msgpack' ) , 'rb' ) as f:
a__: Optional[Any] = from_bytes(state.opt_state , f.read() )
a__: Optional[Any] = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'args.joblib' ) )
a__: int = joblib.load(os.path.join(_SCREAMING_SNAKE_CASE , 'data_collator.joblib' ) )
with open(os.path.join(_SCREAMING_SNAKE_CASE , 'training_state.json' ) , 'r' ) as f:
a__: Any = json.load(_SCREAMING_SNAKE_CASE )
a__: Optional[Any] = training_state['step']
print('DONE' )
return params, opt_state, step, args, data_collator
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
a__: str = num_train_steps - warmup_steps
a__: str = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=_SCREAMING_SNAKE_CASE , transition_steps=_SCREAMING_SNAKE_CASE )
a__: List[Any] = optax.linear_schedule(init_value=_SCREAMING_SNAKE_CASE , end_value=1e-7 , transition_steps=_SCREAMING_SNAKE_CASE )
a__: int = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Tuple:
def weight_decay_mask(_SCREAMING_SNAKE_CASE ):
a__: List[Any] = traverse_util.flatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = {k: (v[-1] != 'bias' and v[-2:] != ('LayerNorm', 'scale')) for k, v in params.items()}
return traverse_util.unflatten_dict(_SCREAMING_SNAKE_CASE )
a__: List[str] = scheduler_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
a__: Any = optax.adamw(learning_rate=_SCREAMING_SNAKE_CASE , weight_decay=_SCREAMING_SNAKE_CASE , mask=_SCREAMING_SNAKE_CASE )
return tx, lr
| 290
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__A : List[Any] = logging.get_logger(__name__)
@dataclass
class __snake_case ( __a):
"""simple docstring"""
lowercase = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self : int , **lowerCamelCase : List[Any] ) -> List[str]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase_ : Optional[int] = deprecated_arg[3:]
lowerCAmelCase_ : int = not kwargs.pop(a__ )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no-{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""tpu_name""" , self.tpu_name )
lowerCAmelCase_ : List[str] = kwargs.pop("""device_idx""" , self.device_idx )
lowerCAmelCase_ : Tuple = kwargs.pop("""eager_mode""" , self.eager_mode )
lowerCAmelCase_ : Union[str, Any] = kwargs.pop("""use_xla""" , self.use_xla )
super().__init__(**a__ )
lowercase = field(
default=__a ,metadata={'help': 'Name of TPU'} ,)
lowercase = field(
default=0 ,metadata={'help': 'CPU / GPU device index. Defaults to 0.'} ,)
lowercase = field(default=__a ,metadata={'help': 'Benchmark models in eager model.'})
lowercase = field(
default=__a ,metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} ,)
@cached_property
def __lowercase ( self : int ) -> Any:
requires_backends(self , ["""tf"""] )
lowerCAmelCase_ : int = None
if self.tpu:
try:
if self.tpu_name:
lowerCAmelCase_ : int = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
lowerCAmelCase_ : Optional[int] = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
lowerCAmelCase_ : int = None
return tpu
@cached_property
def __lowercase ( self : Optional[Any] ) -> int:
requires_backends(self , ["""tf"""] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
lowerCAmelCase_ : List[str] = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" )
lowerCAmelCase_ : Optional[int] = tf.distribute.OneDeviceStrategy(device=F'/gpu:{self.device_idx}' )
else:
tf.config.set_visible_devices([] , """GPU""" ) # disable GPU
lowerCAmelCase_ : Optional[Any] = tf.distribute.OneDeviceStrategy(device=F'/cpu:{self.device_idx}' )
return strategy
@property
def __lowercase ( self : List[str] ) -> List[str]:
requires_backends(self , ["""tf"""] )
return self._setup_tpu is not None
@property
def __lowercase ( self : int ) -> Optional[int]:
requires_backends(self , ["""tf"""] )
return self._setup_strategy
@property
def __lowercase ( self : str ) -> Dict:
requires_backends(self , ["""tf"""] )
return tf.config.list_physical_devices("""GPU""" )
@property
def __lowercase ( self : Dict ) -> Union[str, Any]:
requires_backends(self , ["""tf"""] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowercase ( self : Optional[Any] ) -> Tuple:
return self.n_gpu > 0
| 359
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 89
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : List[Any] = {
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Optional[Any] = ['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Any = [
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_snake_case : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class _UpperCAmelCase ( lowercase_ ):
def __init__( self :Dict , __UpperCamelCase :WhisperForConditionalGeneration , __UpperCamelCase :WhisperProcessor , __UpperCamelCase :AutoencoderKL , __UpperCamelCase :CLIPTextModel , __UpperCamelCase :CLIPTokenizer , __UpperCamelCase :UNetaDConditionModel , __UpperCamelCase :Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCamelCase :StableDiffusionSafetyChecker , __UpperCamelCase :CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def lowerCamelCase ( self :Any , __UpperCamelCase :Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__( self :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Dict=1_60_00 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 5_12 , __UpperCamelCase :int = 50 , __UpperCamelCase :float = 7.5 , __UpperCamelCase :Optional[Union[str, List[str]]] = None , __UpperCamelCase :Optional[int] = 1 , __UpperCamelCase :float = 0.0 , __UpperCamelCase :Optional[torch.Generator] = None , __UpperCamelCase :Optional[torch.FloatTensor] = None , __UpperCamelCase :Optional[str] = "pil" , __UpperCamelCase :bool = True , __UpperCamelCase :Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCamelCase :int = 1 , **__UpperCamelCase :Dict , ):
A = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="pt" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
A = self.speech_model.generate(__UpperCamelCase , max_length=48_00_00 )
A = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
A = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = len(__UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__UpperCamelCase )}." )
# get prompt text embeddings
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A = text_input_ids[:, : self.tokenizer.model_max_length]
A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A, A, A = text_embeddings.shape
A = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A = 42
if negative_prompt is None:
A = [""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !="
f" {type(__UpperCamelCase )}." )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
A = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
A = negative_prompt
A = text_input_ids.shape[-1]
A = self.tokenizer(
__UpperCamelCase , padding="max_length" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="pt" , )
A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A = uncond_embeddings.shape[1]
A = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
A = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="cpu" , dtype=__UpperCamelCase ).to(
self.device )
else:
A = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A = {}
if accepts_eta:
A = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
A = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
A, A = noise_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = 1 / 0.18_215 * latents
A = self.vae.decode(__UpperCamelCase ).sample
A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 292
| 1
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCAmelCase = concatenate_datasets
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadManager
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 366
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class snake_case_ ( __lowercase ):
A_ = 'biogpt'
def __init__( self : int , _snake_case : Any=42384 , _snake_case : Any=1024 , _snake_case : List[Any]=24 , _snake_case : Any=16 , _snake_case : List[str]=4096 , _snake_case : Dict="gelu" , _snake_case : Tuple=0.1 , _snake_case : str=0.1 , _snake_case : Tuple=1024 , _snake_case : Tuple=0.02 , _snake_case : Tuple=1E-12 , _snake_case : Optional[int]=True , _snake_case : Optional[int]=True , _snake_case : Any=0.0 , _snake_case : Tuple=0.0 , _snake_case : str=1 , _snake_case : Dict=0 , _snake_case : str=2 , **_snake_case : Union[str, Any] , )->Dict:
'''simple docstring'''
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = max_position_embeddings
__lowerCAmelCase : str = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[Any] = num_attention_heads
__lowerCAmelCase : Optional[Any] = intermediate_size
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Optional[int] = scale_embedding
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : str = layerdrop
__lowerCAmelCase : Dict = activation_dropout
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
| 232
| 0
|
import string
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = """"""
for i in sequence:
UpperCAmelCase_: int = ord(lowerCAmelCase__ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def lowerCAmelCase_ (lowerCAmelCase__: str ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = string.ascii_letters
UpperCAmelCase_: Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase__ )] if c in letters else c for c in sequence )
def lowerCAmelCase_ ():
"""simple docstring"""
from timeit import timeit
print("""Running performance benchmarks...""" )
UpperCAmelCase_: Union[str, Any] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase__ )} seconds' )
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase__ )} seconds' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 147
|
'''simple docstring'''
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = [0 for i in range(r + 1 )]
# nc0 = 1
snake_case_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
snake_case_ = min(snake_case , snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 85
| 0
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCAmelCase :
@staticmethod
def __magic_name__ ( *__A : Optional[int], **__A : str ):
pass
@is_pipeline_test
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
UpperCamelCase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def __magic_name__ ( self : Any, __A : int, __A : int, __A : Optional[Any] ):
UpperCAmelCase : Optional[Any] = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : int = [
{
'''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''question''': '''How many cats are there?''',
},
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''question''': '''How many cats are there?''',
},
]
return vqa_pipeline, examples
def __magic_name__ ( self : Dict, __A : List[str], __A : Optional[int] ):
UpperCAmelCase : Union[str, Any] = vqa_pipeline(__A, top_k=1 )
self.assertEqual(
__A, [
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
[{'''score''': ANY(__A ), '''answer''': ANY(__A )}],
], )
@require_torch
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : List[Any] = pipeline('''visual-question-answering''', model='''hf-internal-testing/tiny-vilt-random-vqa''' )
UpperCAmelCase : int = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : int = '''How many cats are there?'''
UpperCAmelCase : Union[str, Any] = vqa_pipeline(image=__A, question='''How many cats are there?''', top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
UpperCAmelCase : Dict = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
__A, [{'''score''': ANY(__A ), '''answer''': ANY(__A )}, {'''score''': ANY(__A ), '''answer''': ANY(__A )}] )
@slow
@require_torch
def __magic_name__ ( self : int ):
UpperCAmelCase : Tuple = pipeline('''visual-question-answering''', model='''dandelin/vilt-b32-finetuned-vqa''' )
UpperCAmelCase : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase : List[Any] = '''How many cats are there?'''
UpperCAmelCase : List[str] = vqa_pipeline(image=__A, question=__A, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase : Optional[int] = vqa_pipeline({'''image''': image, '''question''': question}, top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}] )
UpperCAmelCase : int = vqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}], top_k=2 )
self.assertEqual(
nested_simplify(__A, decimals=4 ), [[{'''score''': 0.8_7_9_9, '''answer''': '''2'''}, {'''score''': 0.2_9_6, '''answer''': '''1'''}]] * 2, )
@require_tf
@unittest.skip('''Visual question answering not implemented in TF''' )
def __magic_name__ ( self : List[str] ):
pass
| 99
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Dict, __A : Any, __A : Optional[int]=1_3, __A : Any=7, __A : Tuple=True, __A : int=True, __A : Dict=True, __A : Union[str, Any]=True, __A : Optional[int]=9_9, __A : Optional[int]=3_2, __A : Union[str, Any]=5, __A : Optional[int]=4, __A : str=3_7, __A : Union[str, Any]="gelu", __A : Optional[int]=0.1, __A : Optional[Any]=0.1, __A : Any=5_1_2, __A : List[str]=1_6, __A : Optional[int]=2, __A : Union[str, Any]=0.0_2, __A : Optional[int]=False, __A : List[str]=True, __A : int="None", __A : List[str]=3, __A : Any=4, __A : Dict=None, ):
UpperCAmelCase : str = parent
UpperCAmelCase : int = batch_size
UpperCAmelCase : Tuple = seq_length
UpperCAmelCase : Union[str, Any] = is_training
UpperCAmelCase : Dict = use_input_mask
UpperCAmelCase : Optional[Any] = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Union[str, Any] = hidden_size
UpperCAmelCase : Tuple = num_hidden_layers
UpperCAmelCase : str = num_attention_heads
UpperCAmelCase : Tuple = intermediate_size
UpperCAmelCase : Optional[Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : int = type_vocab_size
UpperCAmelCase : str = type_sequence_label_size
UpperCAmelCase : Union[str, Any] = initializer_range
UpperCAmelCase : Dict = num_labels
UpperCAmelCase : Optional[Any] = num_choices
UpperCAmelCase : str = relative_attention
UpperCAmelCase : Any = position_biased_input
UpperCAmelCase : str = pos_att_type
UpperCAmelCase : Union[str, Any] = scope
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCAmelCase : int = None
if self.use_input_mask:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], vocab_size=2 )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCAmelCase : List[str] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCAmelCase : List[Any] = ids_tensor([self.batch_size], self.num_choices )
UpperCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ ( self : Any ):
return DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, pos_att_type=self.pos_att_type, )
def __magic_name__ ( self : Dict, __A : str ):
self.parent.assertListEqual(list(result.loss.size() ), [] )
def __magic_name__ ( self : List[str], __A : Dict, __A : int, __A : str, __A : List[str], __A : Dict, __A : str, __A : int ):
UpperCAmelCase : Optional[int] = DebertaVaModel(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[int] = model(__A, attention_mask=__A, token_type_ids=__A )[0]
UpperCAmelCase : Optional[int] = model(__A, token_type_ids=__A )[0]
UpperCAmelCase : int = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ), [self.batch_size, self.seq_length, self.hidden_size] )
def __magic_name__ ( self : Dict, __A : Union[str, Any], __A : Optional[Any], __A : Tuple, __A : Optional[int], __A : List[Any], __A : List[Any], __A : Optional[int] ):
UpperCAmelCase : int = DebertaVaForMaskedLM(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : List[str], __A : str, __A : Optional[Any], __A : List[str], __A : Optional[int], __A : List[Any], __A : int, __A : Optional[int] ):
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = DebertaVaForSequenceClassification(__A )
model.to(__A )
model.eval()
UpperCAmelCase : int = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertListEqual(list(result.logits.size() ), [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def __magic_name__ ( self : Any, __A : Tuple, __A : Any, __A : str, __A : List[Any], __A : Dict, __A : Optional[Any], __A : List[str] ):
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : int = DebertaVaForTokenClassification(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Tuple = model(__A, attention_mask=__A, token_type_ids=__A, labels=__A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Tuple, __A : List[str], __A : Tuple, __A : Tuple, __A : int, __A : Optional[Any], __A : Tuple, __A : Any ):
UpperCAmelCase : Union[str, Any] = DebertaVaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Any = model(
__A, attention_mask=__A, token_type_ids=__A, start_positions=__A, end_positions=__A, )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Dict, __A : Optional[int], __A : str, __A : List[str], __A : Dict, __A : Optional[Any], __A : Union[str, Any], __A : int ):
UpperCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=__A )
model.to(__A )
model.eval()
UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1, self.num_choices, -1 ).contiguous()
UpperCAmelCase : int = model(
__A, attention_mask=__A, token_type_ids=__A, labels=__A, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{
"""feature-extraction""": DebertaVaModel,
"""fill-mask""": DebertaVaForMaskedLM,
"""question-answering""": DebertaVaForQuestionAnswering,
"""text-classification""": DebertaVaForSequenceClassification,
"""token-classification""": DebertaVaForTokenClassification,
"""zero-shot""": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : str = DebertaVaModelTester(self )
UpperCAmelCase : Dict = ConfigTester(self, config_class=__A, hidden_size=3_7 )
def __magic_name__ ( self : Any ):
self.config_tester.run_common_tests()
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def __magic_name__ ( self : List[Any] ):
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def __magic_name__ ( self : List[str] ):
UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
def __magic_name__ ( self : Any ):
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__A )
@slow
def __magic_name__ ( self : Dict ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = DebertaVaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase ( unittest.TestCase ):
@unittest.skip(reason='''Model not available yet''' )
def __magic_name__ ( self : str ):
pass
@slow
def __magic_name__ ( self : Optional[int] ):
UpperCAmelCase : str = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
UpperCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
UpperCAmelCase : List[str] = model(__A, attention_mask=__A )[0]
# compare the actual values for a slice.
UpperCAmelCase : List[str] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4], __A, atol=1E-4 ), F'''{output[:, 1:4, 1:4]}''' )
| 99
| 1
|
"""simple docstring"""
import operator as op
def __UpperCAmelCase ( UpperCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
__snake_case : Union[str, Any] = []
__snake_case : int = lambda UpperCAmelCase_ , UpperCAmelCase_ : int(x / y ) # noqa: E731 integer division operation
__snake_case : Optional[Any] = {
'^': op.pow,
'*': op.mul,
'/': div,
'+': op.add,
'-': op.sub,
} # operators & their respective operation
# print table header
print('Symbol'.center(8 ) , 'Action'.center(12 ) , 'Stack' , sep=' | ' )
print('-' * (30 + len(UpperCAmelCase_ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(UpperCAmelCase_ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('push(' + x + ')').ljust(12 ) , ','.join(UpperCAmelCase_ ) , sep=' | ' )
else:
__snake_case : Dict = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + b + ')').ljust(12 ) , ','.join(UpperCAmelCase_ ) , sep=' | ' )
__snake_case : int = stack.pop() # pop stack
# output in tabular format
print(''.rjust(8 ) , ('pop(' + a + ')').ljust(12 ) , ','.join(UpperCAmelCase_ ) , sep=' | ' )
stack.append(
str(opr[x](int(UpperCAmelCase_ ) , int(UpperCAmelCase_ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('push(' + a + x + b + ')').ljust(12 ) , ','.join(UpperCAmelCase_ ) , sep=' | ' , )
return int(stack[0] )
if __name__ == "__main__":
_a : List[str]= input("\n\nEnter a Postfix Equation (space separated) = ").split(" ")
print("\n\tResult = ", solve(Postfix))
| 172
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_a : str= {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any= [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_a : Any= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 172
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
UpperCAmelCase = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> Optional[int]:
"""simple docstring"""
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> Any:
"""simple docstring"""
config.addinivalue_line("""markers""" , """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> str:
"""simple docstring"""
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
lowerCAmelCase = tmp_path_factory.getbasetemp() / """cache"""
lowerCAmelCase = test_hf_cache_home / """datasets"""
lowerCAmelCase = test_hf_cache_home / """metrics"""
lowerCAmelCase = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope="""session""" )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _snake_case ( _SCREAMING_SNAKE_CASE : Any ) -> Optional[int]:
"""simple docstring"""
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""" , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] ) -> str:
"""simple docstring"""
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""" , _SCREAMING_SNAKE_CASE )
| 187
| 0
|
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : int = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_lowerCamelCase : List[Any] = {
'wmt16-en-de-dist-12-1': [2_8.3, 2_7.5_2],
'wmt16-en-de-dist-6-1': [2_7.4, 2_7.1_1],
'wmt16-en-de-12-1': [2_6.9, 2_5.7_5],
}
_lowerCamelCase : str = f'''{src_lang}-{tgt_lang}'''
_lowerCamelCase : Tuple = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowercase__ , exist_ok=lowercase__ )
_lowerCamelCase : int = os.path.join(lowercase__ , 'README.md' )
print(f'''Generating {path}''' )
with open(lowercase__ , 'w' , encoding='utf-8' ) as f:
f.write(lowercase__ )
# make sure we are under the root of the project
lowercase__ = Path(__file__).resolve().parent.parent.parent
lowercase__ = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowercase__ = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 96
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89
| 0
|
"""simple docstring"""
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase__ = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __lowerCAmelCase (_UpperCamelCase ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if args.student_type == "roberta":
__lowerCAmelCase : Optional[int] = False
elif args.student_type == "gpt2":
__lowerCAmelCase : Tuple = False
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if args.student_type == "roberta":
__lowerCAmelCase : Optional[Any] = False
def __lowerCAmelCase ():
__lowerCAmelCase : Any = argparse.ArgumentParser(description='Training' )
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.' )
parser.add_argument(
'--dump_path' , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory (log, checkpoints, parameters, etc.)' )
parser.add_argument(
'--data_file' , type=_UpperCamelCase , required=_UpperCamelCase , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=_UpperCamelCase , choices=['distilbert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to the student configuration.' )
parser.add_argument(
'--student_pretrained_weights' , default=_UpperCamelCase , type=_UpperCamelCase , help='Load student initialization checkpoint.' )
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=_UpperCamelCase , help='Teacher type (BERT, RoBERTa).' )
parser.add_argument('--teacher_name' , type=_UpperCamelCase , required=_UpperCamelCase , help='The teacher model.' )
parser.add_argument('--temperature' , default=2.0 , type=_UpperCamelCase , help='Temperature for the softmax temperature.' )
parser.add_argument(
'--alpha_ce' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the distillation loss. Must be >=0.' )
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=_UpperCamelCase , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=_UpperCamelCase , help='Linear weight for the CLM loss. Must be >=0.' )
parser.add_argument('--alpha_mse' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the MSE loss. Must be >=0.' )
parser.add_argument(
'--alpha_cos' , default=0.0 , type=_UpperCamelCase , help='Linear weight of the cosine embedding loss. Must be >=0.' )
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.' )
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=_UpperCamelCase , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=_UpperCamelCase , help='Proportion of tokens to mask out.' )
parser.add_argument('--word_keep' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to keep.' )
parser.add_argument('--word_rand' , default=0.1 , type=_UpperCamelCase , help='Proportion of tokens to randomly replace.' )
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=_UpperCamelCase , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=_UpperCamelCase , help='The token counts in the data_file for MLM.' )
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=_UpperCamelCase , default=3 , help='Number of pass on the whole dataset.' )
parser.add_argument('--batch_size' , type=_UpperCamelCase , default=5 , help='Batch size (for each process).' )
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCamelCase , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=_UpperCamelCase , help='Linear warmup proportion.' )
parser.add_argument('--weight_decay' , default=0.0 , type=_UpperCamelCase , help='Weight decay if we apply some.' )
parser.add_argument('--learning_rate' , default=5e-4 , type=_UpperCamelCase , help='The initial learning rate for Adam.' )
parser.add_argument('--adam_epsilon' , default=1e-6 , type=_UpperCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , default=5.0 , type=_UpperCamelCase , help='Max gradient norm.' )
parser.add_argument('--initializer_range' , default=0.02 , type=_UpperCamelCase , help='Random initialization range.' )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_UpperCamelCase , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=_UpperCamelCase , default=1 , help='Number of GPUs in the node.' )
parser.add_argument('--local_rank' , type=_UpperCamelCase , default=-1 , help='Distributed training - Local rank' )
parser.add_argument('--seed' , type=_UpperCamelCase , default=56 , help='Random seed' )
parser.add_argument('--log_interval' , type=_UpperCamelCase , default=500 , help='Tensorboard logging interval.' )
parser.add_argument('--checkpoint_interval' , type=_UpperCamelCase , default=4000 , help='Checkpoint interval.' )
__lowerCAmelCase : List[str] = parser.parse_args()
sanity_checks(_UpperCamelCase )
# ARGS #
init_gpu_params(_UpperCamelCase )
set_seed(_UpperCamelCase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
' itUse `--force` if you want to overwrite it' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , 'parameters.json' ) , 'w' ) as f:
json.dump(vars(_UpperCamelCase ) , _UpperCamelCase , indent=4 )
git_log(args.dump_path )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = MODEL_CLASSES[args.student_type]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowerCAmelCase : Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowerCAmelCase : Optional[Any] = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowerCAmelCase : str = tokenizer.all_special_tokens.index(_UpperCamelCase )
__lowerCAmelCase : Tuple = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowerCAmelCase : str = special_tok_ids
__lowerCAmelCase : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , 'rb' ) as fp:
__lowerCAmelCase : Union[str, Any] = pickle.load(_UpperCamelCase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , 'rb' ) as fp:
__lowerCAmelCase : Tuple = pickle.load(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = np.maximum(_UpperCamelCase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowerCAmelCase : Optional[int] = 0.0 # do not predict special tokens
__lowerCAmelCase : Union[str, Any] = torch.from_numpy(_UpperCamelCase )
else:
__lowerCAmelCase : Any = None
__lowerCAmelCase : str = LmSeqsDataset(params=_UpperCamelCase , data=_UpperCamelCase )
logger.info('Data loader created.' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowerCAmelCase : List[str] = student_config_class.from_pretrained(args.student_config )
__lowerCAmelCase : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowerCAmelCase : Union[str, Any] = student_model_class.from_pretrained(args.student_pretrained_weights , config=_UpperCamelCase )
else:
__lowerCAmelCase : Any = student_model_class(_UpperCamelCase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('Student loaded.' )
# TEACHER #
__lowerCAmelCase : Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=_UpperCamelCase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(_UpperCamelCase , _UpperCamelCase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(_UpperCamelCase , _UpperCamelCase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowerCAmelCase : Any = Distiller(
params=_UpperCamelCase , dataset=_UpperCamelCase , token_probs=_UpperCamelCase , student=_UpperCamelCase , teacher=_UpperCamelCase )
distiller.train()
logger.info('Let\'s go get some drinks.' )
if __name__ == "__main__":
main()
| 182
|
"""simple docstring"""
import qiskit
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCAmelCase : str = qiskit.QuantumCircuit(_UpperCamelCase , _UpperCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCAmelCase : Optional[int] = qiskit.execute(_UpperCamelCase , _UpperCamelCase , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_UpperCamelCase )
if __name__ == "__main__":
print(f'Total count for various states are: {single_qubit_measure(1, 1)}')
| 182
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.