code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case ( self : Dict )-> Any:
lowerCamelCase__ : Any =TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
lowerCamelCase__ : Optional[int] ={
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]], dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]], dtype=tf.intaa ),
}
lowerCamelCase__ : Optional[int] =model(__lowerCamelCase )["last_hidden_state"]
lowerCamelCase__ : int =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape, __lowerCamelCase )
# compare the actual values for a slice.
lowerCamelCase__ : Tuple =tf.convert_to_tensor(
[
[
[0.0_681_762, 0.10_894_451, 0.06_772_504],
[-0.06_423_668, 0.02_366_615, 0.04_329_344],
[-0.06_057_295, 0.09_974_135, -0.00_070_584],
]
], dtype=tf.floataa, )
self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1E-4 ) )
| 238
|
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_):
UpperCamelCase__: List[str] = cva.getAffineTransform(A_ ,A_)
return cva.warpAffine(A_ ,A_ ,(rows, cols))
if __name__ == "__main__":
# read original image
A__: Union[str, Any] = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
A__: Tuple = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
A__ , A__: List[Any] = gray_img.shape
# set different points to rotate image
A__: Tuple = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
A__: Dict = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
A__: Any = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
A__: Union[str, Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
A__: str = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
A__: Optional[int] = plt.figure(1)
A__: List[str] = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 149
| 0
|
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@require_torch
def _lowerCamelCase ( self : List[str] ):
a__: Union[str, Any] =pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
a__: Any =load_dataset("ashraq/esc50" )
a__: Union[str, Any] =dataset["train"]["audio"][-1]["array"]
a__: Union[str, Any] =audio_classifier(_a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ) , [{"score": 0.5_0_1, "label": "Sound of a dog"}, {"score": 0.4_9_9, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def _lowerCamelCase ( self : List[Any] ):
pass
@slow
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
a__: List[str] =pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
a__: Optional[int] =load_dataset("ashraq/esc50" )
a__: int =dataset["train"]["audio"][-1]["array"]
a__: Tuple =audio_classifier(_a , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ) , [
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
] , )
a__: Any =audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
a__: int =audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(_a ) , [
[
{"score": 0.9_9_9, "label": "Sound of a dog"},
{"score": 0.0_0_1, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def _lowerCamelCase ( self : Any ):
pass
| 42
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
__UpperCAmelCase = '''
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
>>> repo = "openai/shap-e-img2img"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"
>>> image = load_image(image_url).convert("RGB")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], "corgi_3d.gif")
```
'''
@dataclass
class lowerCamelCase__ ( _a ):
_lowerCAmelCase = 42
class lowerCamelCase__ ( _a ):
def __init__( self : Optional[Any] , _a : PriorTransformer , _a : CLIPVisionModel , _a : CLIPImageProcessor , _a : HeunDiscreteScheduler , _a : ShapERenderer , ):
super().__init__()
self.register_modules(
prior=_a , image_encoder=_a , image_processor=_a , scheduler=_a , renderer=_a , )
def _lowerCamelCase ( self : Tuple , _a : Tuple , _a : Tuple , _a : Any , _a : Any , _a : List[str] , _a : Any ):
if latents is None:
a__: Any =randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
a__: List[str] =latents.to(_a )
a__: int =latents * scheduler.init_noise_sigma
return latents
def _lowerCamelCase ( self : Dict , _a : str=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
a__: List[Any] =torch.device(F"cuda:{gpu_id}" )
a__: List[Any] =[self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowerCamelCase ( self : Any , _a : List[str] , _a : Optional[int] , _a : Union[str, Any] , _a : List[str] , ):
if isinstance(_a , _a ) and isinstance(image[0] , torch.Tensor ):
a__: Optional[int] =torch.cat(_a , axis=0 ) if image[0].ndim == 4 else torch.stack(_a , axis=0 )
if not isinstance(_a , torch.Tensor ):
a__: Optional[Any] =self.image_processor(_a , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
a__: int =image.to(dtype=self.image_encoder.dtype , device=_a )
a__: str =self.image_encoder(_a )["last_hidden_state"]
a__: Tuple =image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
a__: Optional[Any] =image_embeds.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
a__: Union[str, Any] =torch.zeros_like(_a )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
a__: int =torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self : List[Any] , _a : Union[PIL.Image.Image, List[PIL.Image.Image]] , _a : int = 1 , _a : int = 2_5 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[torch.FloatTensor] = None , _a : float = 4.0 , _a : int = 6_4 , _a : Optional[str] = "pil" , _a : bool = True , ):
if isinstance(_a , PIL.Image.Image ):
a__: List[str] =1
elif isinstance(_a , torch.Tensor ):
a__: List[Any] =image.shape[0]
elif isinstance(_a , _a ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
a__: int =len(_a )
else:
raise ValueError(
F"`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_a )}" )
a__: Optional[int] =self._execution_device
a__: Optional[int] =batch_size * num_images_per_prompt
a__: Any =guidance_scale > 1.0
a__: int =self._encode_image(_a , _a , _a , _a )
# prior
self.scheduler.set_timesteps(_a , device=_a )
a__: int =self.scheduler.timesteps
a__: str =self.prior.config.num_embeddings
a__: Dict =self.prior.config.embedding_dim
a__: Dict =self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _a , _a , _a , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
a__: int =latents.reshape(latents.shape[0] , _a , _a )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
a__: Union[str, Any] =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a__: Tuple =self.scheduler.scale_model_input(_a , _a )
a__: Tuple =self.prior(
_a , timestep=_a , proj_embedding=_a , ).predicted_image_embedding
# remove the variance
a__ , a__: List[str] =noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
a__ , a__: Union[str, Any] =noise_pred.chunk(2 )
a__: Union[str, Any] =noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
a__: Any =self.scheduler.step(
_a , timestep=_a , sample=_a , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_a )
a__: List[str] =[]
for i, latent in enumerate(_a ):
print()
a__: Any =self.renderer.decode(
latent[None, :] , _a , size=_a , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , )
images.append(_a )
a__: Tuple =torch.stack(_a )
if output_type not in ["np", "pil"]:
raise ValueError(F"Only the output types `pil` and `np` are supported not output_type={output_type}" )
a__: Dict =images.cpu().numpy()
if output_type == "pil":
a__: Optional[int] =[self.numpy_to_pil(_a ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_a )
| 42
| 1
|
'''simple docstring'''
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[Any]:
A_ = checkpoints.load_tax_checkpoint(UpperCAmelCase__ )
A_ = flatten_dict(UpperCAmelCase__ )
return flax_params
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> List[str]:
A_ = {}
A_ = {
"""token_embedder""": """embeddings""",
"""encoder_norm""": """layernorm""",
"""kernel""": """weight""",
""".out""": """.output""",
"""scale""": """weight""",
"""embedders_0.pos_embedding""": """row_embedder.weight""",
"""embedders_1.pos_embedding""": """column_embedder.weight""",
}
A_ = {
"""query""": """attention.query""",
"""key""": """attention.key""",
"""value""": """attention.value""",
"""output.dense""": """output""",
"""encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""",
"""pre_self_attention_layer_norm""": """self_attention.layer_norm""",
"""pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""",
"""mlp.""": """mlp.DenseReluDense.""",
"""pre_mlp_layer_norm""": """mlp.layer_norm""",
"""self_attention.o""": """self_attention.attention.o""",
"""decoder.embeddings.embedding""": """decoder.embed_tokens.weight""",
"""decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""",
"""decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""",
"""decoder.logits_dense.weight""": """decoder.lm_head.weight""",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
A_ = """.""".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
A_ = new_key.replace(UpperCAmelCase__, UpperCAmelCase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
A_ = re.sub(r"""layers_(\d+)""", r"""layer.\1""", UpperCAmelCase__ )
A_ = new_key.replace("""encoder""", """encoder.encoder""" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
A_ = re.sub(r"""layers_(\d+)""", r"""layer.\1""", UpperCAmelCase__ )
A_ = flax_dict[key]
A_ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
A_ = torch.from_numpy(converted_dict[key].T )
else:
A_ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__=False, UpperCAmelCase__=False ) -> Dict:
A_ = get_flax_param(UpperCAmelCase__ )
if not use_large:
A_ = PixaStructVisionConfig()
A_ = PixaStructTextConfig()
else:
A_ = PixaStructVisionConfig(
hidden_size=15_36, d_ff=39_68, num_attention_heads=24, num_hidden_layers=18 )
A_ = PixaStructTextConfig(hidden_size=15_36, d_ff=39_68, num_heads=24, num_layers=18 )
A_ = PixaStructConfig(
vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=UpperCAmelCase__ )
A_ = PixaStructForConditionalGeneration(UpperCAmelCase__ )
A_ = rename_and_convert_flax_params(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
A_ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" )
A_ = PixaStructImageProcessor()
A_ = PixaStructProcessor(image_processor=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
if use_large:
A_ = 40_96
A_ = True
# mkdir if needed
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
print("""Model saved in {}""".format(UpperCAmelCase__ ) )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
__lowerCamelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 162
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _snake_case , unittest.TestCase ):
lowercase = ShapEPipeline
lowercase = ["prompt"]
lowercase = ["prompt"]
lowercase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowercase = False
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def snake_case_ ( self ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ) -> Optional[Any]:
'''simple docstring'''
return 8
@property
def snake_case_ ( self ) -> Tuple:
'''simple docstring'''
A_ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(UpperCamelCase__ )
@property
def snake_case_ ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ = PriorTransformer(**UpperCamelCase__ )
return model
@property
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
A_ = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ = ShapERenderer(**UpperCamelCase__ )
return model
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
A_ = self.dummy_prior
A_ = self.dummy_text_encoder
A_ = self.dummy_tokenizer
A_ = self.dummy_renderer
A_ = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase__ , clip_sample=UpperCamelCase__ , clip_sample_range=1.0 , )
A_ = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCamelCase__ ).startswith("""mps""" ):
A_ = torch.manual_seed(UpperCamelCase__ )
else:
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A_ = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
A_ = """cpu"""
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A_ = output.images[0]
A_ = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def snake_case_ ( self ) -> Union[str, Any]:
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = torch_device == """cpu"""
A_ = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase__ , relax_max_difference=UpperCamelCase__ , )
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.get_dummy_components()
A_ = self.pipeline_class(**UpperCamelCase__ )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = 1
A_ = 2
A_ = self.get_dummy_inputs(UpperCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
A_ = batch_size * [inputs[key]]
A_ = pipe(**UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def snake_case_ ( self ) -> Any:
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ) -> str:
'''simple docstring'''
A_ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A_ = pipe(
"""a shark""" , generator=UpperCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 162
| 1
|
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase__ ):
"""simple docstring"""
def __init__( self : List[Any] , *__a : List[str] , **__a : Optional[Any] ):
warnings.warn(
"The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use GLPNImageProcessor instead." , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 358
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
lowerCAmelCase_ : List[str] = TypeVar('T')
lowerCAmelCase_ : Dict = TypeVar('U')
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : T | None , __a : U | None ):
_a = key
_a = val
_a = None
_a = None
def __repr__( self : Any ):
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
def __init__( self : Dict ):
_a = DoubleLinkedListNode(__a , __a )
_a = DoubleLinkedListNode(__a , __a )
_a , _a = self.rear, self.head
def __repr__( self : str ):
_a = ["DoubleLinkedList"]
_a = self.head
while node.next is not None:
rep.append(str(__a ) )
_a = node.next
rep.append(str(self.rear ) )
return ",\n ".join(__a )
def UpperCamelCase__ ( self : int , __a : DoubleLinkedListNode[T, U] ):
_a = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_a = node
_a = previous
_a = node
_a = self.rear
def UpperCamelCase__ ( self : Any , __a : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
_a = node.next
_a = node.prev
_a = None
_a = None
return node
class __SCREAMING_SNAKE_CASE (Generic[T, U] ):
"""simple docstring"""
__a ={}
def __init__( self : Union[str, Any] , __a : int ):
_a = DoubleLinkedList()
_a = capacity
_a = 0
_a = 0
_a = 0
_a = {}
def __repr__( self : Optional[int] ):
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : str , __a : T ):
return key in self.cache
def UpperCamelCase__ ( self : str , __a : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
_a = self.cache[key]
_a = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(__a )
return node.val
self.miss += 1
return None
def UpperCamelCase__ ( self : Tuple , __a : T , __a : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_a = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(__a ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_a = DoubleLinkedListNode(__a , __a )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_a = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_a = value
self.list.add(__a )
@classmethod
def UpperCamelCase__ ( cls : Tuple , __a : int = 1_28 ):
def cache_decorator_inner(__a : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*__a : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
_a = LRUCache(__a )
_a = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_a = func(*__a )
cls.decorator_function_to_instance_map[func].put(args[0] , __a )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(__a , "cache_info" , __a ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 346
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'configuration_deberta': ['DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DebertaConfig', 'DebertaOnnxConfig'],
'tokenization_deberta': ['DebertaTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['DebertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'DebertaForMaskedLM',
'DebertaForQuestionAnswering',
'DebertaForSequenceClassification',
'DebertaForTokenClassification',
'DebertaModel',
'DebertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDebertaForMaskedLM',
'TFDebertaForQuestionAnswering',
'TFDebertaForSequenceClassification',
'TFDebertaForTokenClassification',
'TFDebertaModel',
'TFDebertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def __lowercase ( lowerCamelCase : str , lowerCamelCase : str , **lowerCamelCase : List[Any] ):
UpperCamelCase_ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase , **lowerCamelCase )
UpperCamelCase_ : str = AutoModelForSeqaSeqLM.from_config(lowerCamelCase )
model.save_pretrained(lowerCamelCase )
AutoTokenizer.from_pretrained(lowerCamelCase ).save_pretrained(lowerCamelCase )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 175
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 145
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145
| 1
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__lowerCAmelCase : Any = logging.getLogger(__name__)
def UpperCAmelCase_ ( __lowerCAmelCase=2 , __lowerCAmelCase=3 , __lowerCAmelCase=16 , __lowerCAmelCase = 10 , __lowerCAmelCase = 2 ) -> Dict:
def get_dataset(__lowerCAmelCase ):
__lowercase : Any = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowercase : Dict = get_dataset(__UpperCamelCase )
__lowercase : Dict = get_dataset(__UpperCamelCase )
__lowercase : Dict = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
__lowercase : Optional[Any] = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ) -> Dict:
__lowercase : Union[str, Any] = []
for epoch in range(__UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__lowercase : Any = batch
__lowercase : Dict = model(__UpperCamelCase )
__lowercase : Any = torch.nn.functional.mse_loss(__UpperCamelCase , __UpperCamelCase )
accelerator.backward(__UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] ):
super().__init__()
__lowercase : str = nn.Parameter(torch.randn(1 ) )
__lowercase : Dict = nn.Parameter(torch.randn(1 ) )
def snake_case_ ( self : Dict , _snake_case : int ):
return x * self.a + self.b
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase : List[str] = DummyModel()
__lowercase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : Any = dummy_dataloaders()
__lowercase : int = ProjectConfiguration(total_limit=1 , project_dir=A_ , automatic_checkpoint_naming=A_ )
# Train baseline
__lowercase : str = Accelerator(project_config=A_ )
__lowercase : Union[str, Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def snake_case_ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase : Dict = DummyModel()
__lowercase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : Dict = dummy_dataloaders()
# Train baseline
__lowercase : Optional[int] = Accelerator()
__lowercase : List[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
__lowercase : Union[str, Any] = os.path.join(A_ , '''initial''' )
accelerator.save_state(A_ )
(__lowercase) : Union[str, Any] = model.a.item(), model.b.item()
__lowercase : List[Any] = optimizer.state_dict()
__lowercase : List[Any] = train(3 , A_ , A_ , A_ , A_ )
(__lowercase) : str = model.a.item(), model.b.item()
__lowercase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowercase : List[Any] = DummyModel()
__lowercase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : str = dummy_dataloaders()
__lowercase : Dict = Accelerator()
__lowercase : List[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(A_ )
(__lowercase) : List[Any] = model.a.item(), model.b.item()
__lowercase : Tuple = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
__lowercase : Union[str, Any] = train(2 , A_ , A_ , A_ , A_ )
# Save everything
__lowercase : List[str] = os.path.join(A_ , '''checkpoint''' )
accelerator.save_state(A_ )
# Load everything back in and make sure all states work
accelerator.load_state(A_ )
test_rands += train(1 , A_ , A_ , A_ , A_ )
(__lowercase) : List[str] = model.a.item(), model.b.item()
__lowercase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def snake_case_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase : Optional[Any] = DummyModel()
__lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : Dict = dummy_dataloaders()
__lowercase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__lowercase : Any = Accelerator(project_dir=A_ , project_config=A_ )
__lowercase : str = accelerator.prepare(
A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
(__lowercase) : Optional[int] = model.a.item(), model.b.item()
__lowercase : Optional[int] = optimizer.state_dict()
__lowercase : Optional[int] = train(3 , A_ , A_ , A_ , A_ )
(__lowercase) : Union[str, Any] = model.a.item(), model.b.item()
__lowercase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowercase : Any = DummyModel()
__lowercase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : Dict = dummy_dataloaders()
__lowercase : List[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A_ )
__lowercase : List[str] = Accelerator(project_dir=A_ , project_config=A_ )
__lowercase : List[Any] = accelerator.prepare(
A_ , A_ , A_ , A_ )
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) )
(__lowercase) : Optional[int] = model.a.item(), model.b.item()
__lowercase : Any = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
__lowercase : Optional[Any] = train(2 , A_ , A_ , A_ , A_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , A_ , A_ , A_ , A_ )
(__lowercase) : List[str] = model.a.item(), model.b.item()
__lowercase : Union[str, Any] = optimizer.state_dict()
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
self.assertEqual(A_ , A_ )
def snake_case_ ( self : Optional[int] ):
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3] )
__lowercase : Any = torch.tensor([2, 3, 4] )
__lowercase : Any = DummyModel()
__lowercase : Dict = torch.optim.Adam(net.parameters() )
__lowercase : str = Accelerator()
with self.assertRaises(A_ ) as ve:
accelerator.register_for_checkpointing(A_ , A_ , A_ , A_ )
__lowercase : str = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def snake_case_ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase : Optional[Any] = DummyModel()
__lowercase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowercase : Optional[int] = torch.optim.lr_scheduler.StepLR(A_ , step_size=1 , gamma=0.99 )
__lowercase : str = dummy_dataloaders()
__lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A_ )
# Train baseline
__lowercase : Optional[int] = Accelerator(project_dir=A_ , project_config=A_ )
__lowercase : Any = accelerator.prepare(
A_ , A_ , A_ , A_ , A_ )
# Save initial
accelerator.save_state()
__lowercase : Union[str, Any] = scheduler.state_dict()
train(3 , A_ , A_ , A_ , A_ , A_ )
self.assertNotEqual(A_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(A_ , scheduler.state_dict() )
def snake_case_ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowercase : List[str] = DummyModel()
__lowercase : Any = ProjectConfiguration(automatic_checkpoint_naming=A_ , total_limit=2 )
# Train baseline
__lowercase : Optional[int] = Accelerator(project_dir=A_ , project_config=A_ )
__lowercase : List[Any] = accelerator.prepare(A_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(A_ , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def snake_case_ ( self : List[Any] ):
__lowercase : Optional[Any] = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(A_ , env=os.environ.copy() )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = '''/tmp/accelerate/state_checkpointing'''
__lowerCAmelCase : List[str] = DummyModel()
__lowerCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__lowerCAmelCase : List[str] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__lowerCAmelCase : Optional[Any] = dummy_dataloaders()
__lowerCAmelCase : Any = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__lowerCAmelCase : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__lowerCAmelCase : Any = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__lowerCAmelCase : Dict = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__lowerCAmelCase : List[str] = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
__lowerCAmelCase : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
__lowerCAmelCase : List[str] = group['''params'''][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
__lowerCAmelCase : Optional[int] = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 156
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : List[str] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ):
_a = '''maskformer-swin'''
_a = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , A_ : Dict=2_2_4 , A_ : Optional[Any]=4 , A_ : List[str]=3 , A_ : str=9_6 , A_ : Optional[Any]=[2, 2, 6, 2] , A_ : Tuple=[3, 6, 1_2, 2_4] , A_ : List[Any]=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : int=0.0 , A_ : str=0.1 , A_ : Optional[int]="gelu" , A_ : List[Any]=False , A_ : int=0.02 , A_ : int=1e-5 , A_ : Optional[int]=None , A_ : List[str]=None , **A_ : List[Any] , ):
super().__init__(**A_)
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : Any = embed_dim
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : Union[str, Any] = len(A_)
lowerCAmelCase_ : List[str] = num_heads
lowerCAmelCase_ : Dict = window_size
lowerCAmelCase_ : Optional[int] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = drop_path_rate
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : str = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : str = int(embed_dim * 2 ** (len(A_) - 1))
lowerCAmelCase_ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A_) + 1)]
lowerCAmelCase_ , lowerCAmelCase_ : int = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names)
| 103
| 0
|
import math
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : float ):
'''simple docstring'''
return math.pow(lowercase , 2 ) - a
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
return 2 * x
def _SCREAMING_SNAKE_CASE ( lowercase : float ):
'''simple docstring'''
lowerCamelCase_ = 2.0
while start <= a:
lowerCamelCase_ = math.pow(lowercase , 2 )
return start
def _SCREAMING_SNAKE_CASE ( lowercase : float , lowercase : int = 99_99 , lowercase : float = 0.00_0000_0000_0001 ):
'''simple docstring'''
if a < 0:
raise ValueError('math domain error' )
lowerCamelCase_ = get_initial_point(lowercase )
for _ in range(lowercase ):
lowerCamelCase_ = value
lowerCamelCase_ = value - fx(lowercase , lowercase ) / fx_derivative(lowercase )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208
|
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase : dict , lowercase : str ):
'''simple docstring'''
lowerCamelCase_ , lowerCamelCase_ = set(lowercase ), [start]
while stack:
lowerCamelCase_ = stack.pop()
explored.add(lowercase )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(lowercase )
return explored
lowerCamelCase : int = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 208
| 1
|
'''simple docstring'''
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=0 ) -> List[str]:
# Format the message.
if name is None:
_snake_case = None
else:
_snake_case = '.' * max(0 , spaces - 2 ) + '# {:' + str(50 - spaces ) + 's}'
_snake_case = fmt.format(__A )
# Print and recurse (if needed).
if isinstance(__A , __A ):
if msg is not None:
print(__A )
for k in val.keys():
recursive_print(__A , val[k] , spaces + 2 )
elif isinstance(__A , torch.Tensor ):
print(__A , ':' , val.size() )
else:
print(__A , ':' , __A )
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A , __A ) -> Optional[Any]:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_snake_case = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_snake_case = (num_heads, hidden_size, num_splits) + input_shape[1:]
_snake_case = param.view(*__A )
_snake_case = param.transpose(0 , 2 )
_snake_case = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_snake_case = (num_heads, num_splits, hidden_size) + input_shape[1:]
_snake_case = param.view(*__A )
_snake_case = param.transpose(0 , 1 ).contiguous()
_snake_case = param.view(*__A )
return param
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Union[str, Any]:
# The converted output model.
_snake_case = {}
# old versions did not store training args
_snake_case = input_state_dict.get('args' , __A )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_snake_case = ds_args.padded_vocab_size
_snake_case = ds_args.max_position_embeddings
_snake_case = ds_args.hidden_size
_snake_case = ds_args.num_layers
_snake_case = ds_args.num_attention_heads
_snake_case = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_snake_case = config.n_head
# The hidden_size per head.
_snake_case = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_snake_case = input_state_dict['checkpoint_version']
else:
_snake_case = 0.0
# The model.
_snake_case = input_state_dict['model']
# The language model.
_snake_case = model['language_model']
# The embeddings.
_snake_case = lm['embedding']
# The word embeddings.
_snake_case = embeddings['word_embeddings']['weight']
# Truncate the embedding table to vocab_size rows.
_snake_case = word_embeddings[: config.vocab_size, :]
_snake_case = word_embeddings
# The position embeddings.
_snake_case = embeddings['position_embeddings']['weight']
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_snake_case = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
_snake_case = pos_embeddings
# The transformer.
_snake_case = lm['transformer'] if 'transformer' in lm.keys() else lm['encoder']
# The regex to extract layer names.
_snake_case = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
_snake_case = {
'attention.dense': '.attn.c_proj.',
'self_attention.dense': '.attn.c_proj.',
'mlp.dense_h_to_4h': '.mlp.c_fc.',
'mlp.dense_4h_to_h': '.mlp.c_proj.',
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_snake_case = layer_re.match(__A )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_snake_case = int(m.group(1 ) )
# The name of the operation.
_snake_case = m.group(2 )
# Is it a weight or a bias?
_snake_case = m.group(3 )
# The name of the layer.
_snake_case = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
_snake_case = 'ln_1' if op_name.startswith('input' ) else 'ln_2'
_snake_case = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_snake_case = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __A , __A )
_snake_case = causal_mask
# Insert a "dummy" tensor for masked_bias.
_snake_case = torch.tensor(-1e4 , dtype=torch.floataa )
_snake_case = masked_bias
_snake_case = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_snake_case = out_val.transpose(0 , 1 ).contiguous()
# Store.
_snake_case = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_snake_case = fix_query_key_value_ordering(__A , __A , 3 , __A , __A )
# Store. No change of shape.
_snake_case = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_snake_case = megatron_to_transformers[op_name]
_snake_case = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
_snake_case = megatron_to_transformers[op_name]
_snake_case = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_snake_case = transformer['final_layernorm.weight']
_snake_case = transformer['final_layernorm.bias']
# For LM head, transformers' wants the matrix to weight embeddings.
_snake_case = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
# Create the argument parser.
_snake_case = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=__A , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=__A , help='An optional config json file describing the pre-trained model.' , )
_snake_case = parser.parse_args()
# Extract the basename.
_snake_case = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
_snake_case = torch.load(__A , map_location='cpu' )
else:
_snake_case = torch.load(args.path_to_checkpoint , map_location='cpu' )
_snake_case = input_state_dict.get('args' , __A )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_snake_case = 'gelu_fast'
elif ds_args.openai_gelu:
_snake_case = 'gelu_new'
else:
_snake_case = 'gelu'
else:
# in the very early days this used to be "gelu_new"
_snake_case = 'gelu_new'
# Spell out all parameters in case the defaults change.
_snake_case = GPTaConfig(
vocab_size=50_257 , n_positions=1_024 , n_embd=1_024 , n_layer=24 , n_head=16 , n_inner=4_096 , activation_function=__A , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.0_2 , summary_type='cls_index' , summary_use_proj=__A , summary_activation=__A , summary_proj_to_labels=__A , summary_first_dropout=0.1 , scale_attn_weights=__A , use_cache=__A , bos_token_id=50_256 , eos_token_id=50_256 , )
else:
_snake_case = GPTaConfig.from_json_file(args.config_file )
_snake_case = ['GPT2LMHeadModel']
# Convert.
print('Converting' )
_snake_case = convert_megatron_checkpoint(__A , __A , __A )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__A , __A )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_snake_case = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_snake_case = 'gpt2'
elif tokenizer_type == "PretrainedFromHF":
_snake_case = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
_snake_case = 'gpt2'
_snake_case = AutoTokenizer.from_pretrained(__A )
_snake_case = type(__A ).__name__
_snake_case = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(__A )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__A )
# Store the state_dict to file.
_snake_case = os.path.join(__A , 'pytorch_model.bin' )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__A , __A )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 42
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("transformers.models.speecht5")
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Dict:
hf_model.apply_weight_norm()
_snake_case = checkpoint['input_conv.weight_g']
_snake_case = checkpoint['input_conv.weight_v']
_snake_case = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_snake_case = checkpoint[F'upsamples.{i}.1.weight_g']
_snake_case = checkpoint[F'upsamples.{i}.1.weight_v']
_snake_case = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_snake_case = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_snake_case = checkpoint['output_conv.1.weight_g']
_snake_case = checkpoint['output_conv.1.weight_v']
_snake_case = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A=None , __A=None , ) -> List[Any]:
if config_path is not None:
_snake_case = SpeechTaHifiGanConfig.from_pretrained(__A )
else:
_snake_case = SpeechTaHifiGanConfig()
_snake_case = SpeechTaHifiGan(__A )
_snake_case = torch.load(__A )
load_weights(orig_checkpoint['model']['generator'] , __A , __A )
_snake_case = np.load(__A )
_snake_case = stats[0].reshape(-1 )
_snake_case = stats[1].reshape(-1 )
_snake_case = torch.from_numpy(__A ).float()
_snake_case = torch.from_numpy(__A ).float()
model.save_pretrained(__A )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(__A )
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 42
| 1
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Tuple:
_a : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=6_4 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=3_2 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=1_0 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ) -> Optional[int]:
_a : int = parent
_a : str = batch_size
_a : Dict = image_size
_a : str = patch_size
_a : Optional[Any] = num_channels
_a : str = make_divisible(5_1_2 * width_multiplier , divisor=8 )
_a : Any = hidden_act
_a : Dict = conv_kernel_size
_a : List[Any] = output_stride
_a : Any = classifier_dropout_prob
_a : Optional[int] = use_labels
_a : List[str] = is_training
_a : List[Any] = num_labels
_a : Any = initializer_range
_a : Union[str, Any] = scope
_a : Dict = width_multiplier
_a : int = ffn_dropout
_a : int = attn_dropout
def __lowercase ( self ) -> int:
_a : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
_a : List[str] = None
if self.use_labels:
_a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> List[Any]:
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __lowercase ( self , _a , _a , _a , _a ) -> Any:
_a : str = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self , _a , _a , _a , _a ) -> str:
_a : Union[str, Any] = self.num_labels
_a : int = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[Any] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , _a , _a , _a , _a ) -> Union[str, Any]:
_a : Optional[Any] = self.num_labels
_a : List[str] = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a : Union[str, Any] = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> int:
_a : str = self.prepare_config_and_inputs()
_a , _a , _a , _a : Dict = config_and_inputs
_a : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : int = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> List[str]:
_a : Any = MobileViTVaModelTester(self )
_a : List[Any] = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> Tuple:
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> List[Any]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(_a )
_a : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Optional[int] = [*signature.parameters.keys()]
_a : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Any:
def check_hidden_states_output(_a , _a , _a ):
_a : List[Any] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Tuple = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[int] = outputs.hidden_states
_a : List[str] = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a : List[str] = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> Dict:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __lowercase ( self ) -> Any:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> int:
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
_a : str = self.default_image_processor
_a : List[Any] = prepare_img()
_a : str = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Any = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Dict = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Tuple:
_a : int = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : List[str] = model.to(_a )
_a : Any = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(**_a )
_a : Dict = outputs.logits
# verify the logits
_a : Tuple = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _a )
_a : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> Tuple:
_a : Optional[int] = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : Optional[Any] = model.to(_a )
_a : str = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
_a : List[str] = prepare_img()
_a : Dict = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : int = model(**_a )
_a : List[Any] = outputs.logits.detach().cpu()
_a : List[Any] = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(5_0, 6_0)] )
_a : Tuple = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _a )
_a : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=_a )
_a : Optional[int] = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _a )
| 15
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum ):
"""simple docstring"""
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : Optional[Any] = 2
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self , *_a , **_a ) -> List[str]:
super().__init__(*_a , **_a )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_a : Dict = None
if self.model.config.prefix is not None:
_a : List[Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_a : Optional[Any] = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_a , _a , _a : str = self._sanitize_parameters(prefix=_a , **self._forward_params )
_a : Optional[Any] = {**self._preprocess_params, **preprocess_params}
_a : List[Any] = {**self._forward_params, **forward_params}
def __lowercase ( self , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , _a=None , **_a , ) -> Optional[int]:
_a : List[Any] = {}
if prefix is not None:
_a : Optional[Any] = prefix
if prefix:
_a : Dict = self.tokenizer(
_a , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Tuple = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_a : Dict = handle_long_generation
preprocess_params.update(_a )
_a : Tuple = generate_kwargs
_a : Any = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_a : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_a : Any = ReturnType.TENSORS
if return_type is not None:
_a : Any = return_type
if clean_up_tokenization_spaces is not None:
_a : List[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_a : Tuple = self.tokenizer.encode(_a , add_special_tokens=_a )
if len(_a ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_a : List[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_a , **_a )
def __call__( self , _a , **_a ) -> List[str]:
return super().__call__(_a , **_a )
def __lowercase ( self , _a , _a="" , _a=None , **_a ) -> List[Any]:
_a : Optional[int] = self.tokenizer(
prefix + prompt_text , padding=_a , add_special_tokens=_a , return_tensors=self.framework )
_a : Union[str, Any] = prompt_text
if handle_long_generation == "hole":
_a : List[str] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_a : int = generate_kwargs['''max_new_tokens''']
else:
_a : List[Any] = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_a : List[str] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_a : List[Any] = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_a : List[str] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def __lowercase ( self , _a , **_a ) -> Optional[int]:
_a : Any = model_inputs['''input_ids''']
_a : Optional[Any] = model_inputs.get('''attention_mask''' , _a )
# Allow empty prompts
if input_ids.shape[1] == 0:
_a : int = None
_a : int = None
_a : List[str] = 1
else:
_a : List[Any] = input_ids.shape[0]
_a : Union[str, Any] = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_a : int = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_a : Tuple = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_a : int = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_a : Dict = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_a : Optional[Any] = self.model.generate(input_ids=_a , attention_mask=_a , **_a )
_a : int = generated_sequence.shape[0]
if self.framework == "pt":
_a : Tuple = generated_sequence.reshape(_a , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_a : List[Any] = tf.reshape(_a , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowercase ( self , _a , _a=ReturnType.FULL_TEXT , _a=True ) -> int:
_a : Tuple = model_outputs['''generated_sequence'''][0]
_a : int = model_outputs['''input_ids''']
_a : Any = model_outputs['''prompt_text''']
_a : Any = generated_sequence.numpy().tolist()
_a : Any = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_a : Optional[int] = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_a : str = self.tokenizer.decode(
_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_a : Union[str, Any] = 0
else:
_a : str = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_a , clean_up_tokenization_spaces=_a , ) )
if return_type == ReturnType.FULL_TEXT:
_a : str = prompt_text + text[prompt_length:]
else:
_a : List[str] = text[prompt_length:]
_a : Union[str, Any] = {'''generated_text''': all_text}
records.append(_a )
return records
| 15
| 1
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
__lowerCamelCase : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Any = """
Examples:
```py
>>> from PIL import Image
>>> import torch
>>> from diffusers import DiffusionPipeline
>>> from diffusers.utils import export_to_gif, load_image
>>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")
>>> repo = \"openai/shap-e-img2img\"
>>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)
>>> pipe = pipe.to(device)
>>> guidance_scale = 3.0
>>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\"
>>> image = load_image(image_url).convert(\"RGB\")
>>> images = pipe(
... image,
... guidance_scale=guidance_scale,
... num_inference_steps=64,
... frame_size=256,
... ).images
>>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\")
```
"""
@dataclass
class A__ ( __snake_case ):
_UpperCAmelCase :Union[PIL.Image.Image, np.ndarray]
class A__ ( __snake_case ):
def __init__( self , A_ , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
super().__init__()
self.register_modules(
prior=A_ , image_encoder=A_ , image_processor=A_ , scheduler=A_ , renderer=A_ , )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if latents is None:
UpperCamelCase : str = randn_tensor(A_ , generator=A_ , device=A_ , dtype=A_ )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCamelCase : Dict = latents.to(A_ )
UpperCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase( self , A_=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase : List[Any] = torch.device(F"""cuda:{gpu_id}""" )
UpperCamelCase : int = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(A_ , A_ )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(A_ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
if isinstance(A_ , A_ ) and isinstance(image[0] , torch.Tensor ):
UpperCamelCase : Any = torch.cat(A_ , axis=0 ) if image[0].ndim == 4 else torch.stack(A_ , axis=0 )
if not isinstance(A_ , torch.Tensor ):
UpperCamelCase : Dict = self.image_processor(A_ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 )
UpperCamelCase : str = image.to(dtype=self.image_encoder.dtype , device=A_ )
UpperCamelCase : Union[str, Any] = self.image_encoder(A_ )["last_hidden_state"]
UpperCamelCase : Union[str, Any] = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
UpperCamelCase : Dict = image_embeds.repeat_interleave(A_ , dim=0 )
if do_classifier_free_guidance:
UpperCamelCase : List[str] = torch.zeros_like(A_ )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase : Dict = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(A_ )
def __call__( self , A_ , A_ = 1 , A_ = 25 , A_ = None , A_ = None , A_ = 4.0 , A_ = 64 , A_ = "pil" , A_ = True , ):
'''simple docstring'''
if isinstance(A_ , PIL.Image.Image ):
UpperCamelCase : Union[str, Any] = 1
elif isinstance(A_ , torch.Tensor ):
UpperCamelCase : str = image.shape[0]
elif isinstance(A_ , A_ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
UpperCamelCase : Optional[Any] = len(A_ )
else:
raise ValueError(
F"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(A_ )}""" )
UpperCamelCase : Dict = self._execution_device
UpperCamelCase : Dict = batch_size * num_images_per_prompt
UpperCamelCase : str = guidance_scale > 1.0
UpperCamelCase : str = self._encode_image(A_ , A_ , A_ , A_ )
# prior
self.scheduler.set_timesteps(A_ , device=A_ )
UpperCamelCase : Optional[Any] = self.scheduler.timesteps
UpperCamelCase : List[str] = self.prior.config.num_embeddings
UpperCamelCase : Union[str, Any] = self.prior.config.embedding_dim
UpperCamelCase : int = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , A_ , A_ , A_ , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
UpperCamelCase : Optional[int] = latents.reshape(latents.shape[0] , A_ , A_ )
for i, t in enumerate(self.progress_bar(A_ ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase : List[str] = self.scheduler.scale_model_input(A_ , A_ )
UpperCamelCase : List[str] = self.prior(
A_ , timestep=A_ , proj_embedding=A_ , ).predicted_image_embedding
# remove the variance
UpperCamelCase , UpperCamelCase : int = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
UpperCamelCase , UpperCamelCase : Optional[int] = noise_pred.chunk(2 )
UpperCamelCase : int = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
UpperCamelCase : Optional[int] = self.scheduler.step(
A_ , timestep=A_ , sample=A_ , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=A_ )
UpperCamelCase : Any = []
for i, latent in enumerate(A_ ):
print()
UpperCamelCase : Union[str, Any] = self.renderer.decode(
latent[None, :] , A_ , size=A_ , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(A_ )
UpperCamelCase : List[str] = torch.stack(A_ )
if output_type not in ["np", "pil"]:
raise ValueError(F"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
UpperCamelCase : str = images.cpu().numpy()
if output_type == "pil":
UpperCamelCase : List[str] = [self.numpy_to_pil(A_ ) for image in images]
# Offload last model to CPU
if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=A_ )
| 52
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
"""simple docstring"""
self.test()
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
UpperCAmelCase__ = False
while not completed:
if counter == 1:
self.reset()
UpperCAmelCase__ = self.advance()
if not self.does_advance(_UpperCAmelCase ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.update(_UpperCAmelCase )
counter += 1
if counter > 1_00_00:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _UpperCAmelCase : List[int] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
UpperCAmelCase__ = token_ids
UpperCAmelCase__ = len(self.token_ids )
UpperCAmelCase__ = -1 # the index of the currently fulfilled step
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.fulfilled_idx += 1
UpperCAmelCase__ = True
if self.fulfilled_idx == (self.seqlen - 1):
UpperCAmelCase__ = True
UpperCAmelCase__ = completed
else:
# failed to make progress.
UpperCAmelCase__ = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = 0
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Optional[int]=False ):
"""simple docstring"""
UpperCAmelCase__ = PhrasalConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.fulfilled_idx
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : List[List[int]] , _UpperCAmelCase : List[str]=True ):
"""simple docstring"""
UpperCAmelCase__ = max([len(_UpperCAmelCase ) for one in nested_token_ids] )
UpperCAmelCase__ = {}
for token_ids in nested_token_ids:
UpperCAmelCase__ = root
for tidx, token_id in enumerate(_UpperCAmelCase ):
if token_id not in level:
UpperCAmelCase__ = {}
UpperCAmelCase__ = level[token_id]
if no_subsets and self.has_subsets(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(
"""Each list in `nested_token_ids` can't be a complete subset of another list, but is"""
f''' {nested_token_ids}.''' )
UpperCAmelCase__ = root
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = self.trie
for current_token in current_seq:
UpperCAmelCase__ = start[current_token]
UpperCAmelCase__ = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.next_tokens(_UpperCAmelCase )
return len(_UpperCAmelCase ) == 0
def SCREAMING_SNAKE_CASE__ ( self : List[str] , _UpperCAmelCase : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = list(root.values() )
if len(_UpperCAmelCase ) == 0:
return 1
else:
return sum([self.count_leaves(_UpperCAmelCase ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = self.count_leaves(_UpperCAmelCase )
return len(_UpperCAmelCase ) != leaf_count
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[List[int]] ):
"""simple docstring"""
super(_UpperCAmelCase , self ).__init__()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or len(_UpperCAmelCase ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(_UpperCAmelCase , _UpperCAmelCase ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
UpperCAmelCase__ = DisjunctiveTrie(_UpperCAmelCase )
UpperCAmelCase__ = nested_token_ids
UpperCAmelCase__ = self.trie.max_height
UpperCAmelCase__ = []
UpperCAmelCase__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Dict , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(_UpperCAmelCase )}''' )
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
if self.does_advance(_UpperCAmelCase ):
self.current_seq.append(_UpperCAmelCase )
UpperCAmelCase__ = True
else:
UpperCAmelCase__ = True
self.reset()
UpperCAmelCase__ = self.trie.reached_leaf(self.current_seq )
UpperCAmelCase__ = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = False
UpperCAmelCase__ = []
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , _UpperCAmelCase : Dict=False ):
"""simple docstring"""
UpperCAmelCase__ = DisjunctiveConstraint(self.token_ids )
if stateful:
UpperCAmelCase__ = self.seqlen
UpperCAmelCase__ = self.current_seq
UpperCAmelCase__ = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] , _UpperCAmelCase : List[Constraint] ):
"""simple docstring"""
UpperCAmelCase__ = constraints
# max # of steps required to fulfill a given constraint
UpperCAmelCase__ = max([c.seqlen for c in constraints] )
UpperCAmelCase__ = len(_UpperCAmelCase )
UpperCAmelCase__ = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
UpperCAmelCase__ = [constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
UpperCAmelCase__ = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
UpperCAmelCase__ = constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
else:
UpperCAmelCase__ = self.inprogress_constraint.advance()
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.append(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
token_list.extend(_UpperCAmelCase )
if len(_UpperCAmelCase ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
UpperCAmelCase__ , UpperCAmelCase__ = self.add(_UpperCAmelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : int ):
"""simple docstring"""
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
UpperCAmelCase__ , UpperCAmelCase__ = False, False
if self.completed:
UpperCAmelCase__ = True
UpperCAmelCase__ = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = self.inprogress_constraint.update(_UpperCAmelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_UpperCAmelCase ) )
UpperCAmelCase__ = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
UpperCAmelCase__ = None
if len(self.pending_constraints ) == 0:
# we're done!
UpperCAmelCase__ = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_UpperCAmelCase ):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = pending_constraint.update(_UpperCAmelCase )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_UpperCAmelCase )
UpperCAmelCase__ = None
if not complete and stepped:
UpperCAmelCase__ = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
UpperCAmelCase__ = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
UpperCAmelCase__ = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , _UpperCAmelCase : List[Any]=True ):
"""simple docstring"""
UpperCAmelCase__ = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
UpperCAmelCase__ = [
constraint.copy(stateful=_UpperCAmelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
UpperCAmelCase__ = self.inprogress_constraint.copy(stateful=_UpperCAmelCase )
UpperCAmelCase__ = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 346
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : Tuple = {
'''configuration_lxmert''': ['''LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LxmertConfig'''],
'''tokenization_lxmert''': ['''LxmertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ['''LxmertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[int] = [
'''LxmertEncoder''',
'''LxmertForPreTraining''',
'''LxmertForQuestionAnswering''',
'''LxmertModel''',
'''LxmertPreTrainedModel''',
'''LxmertVisualFeatureEncoder''',
'''LxmertXLayer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
'''TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLxmertForPreTraining''',
'''TFLxmertMainLayer''',
'''TFLxmertModel''',
'''TFLxmertPreTrainedModel''',
'''TFLxmertVisualFeatureEncoder''',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 363
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
"""simple docstring"""
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=3 , lowercase_ : Tuple=32 , lowercase_ : List[str]=3 , lowercase_ : str=10 , lowercase_ : Tuple=[10, 20, 30, 40] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=True , lowercase_ : Optional[Any]="relu" , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=None , ):
UpperCamelCase__ : Tuple =parent
UpperCamelCase__ : str =batch_size
UpperCamelCase__ : List[Any] =image_size
UpperCamelCase__ : Union[str, Any] =num_channels
UpperCamelCase__ : Dict =embeddings_size
UpperCamelCase__ : Tuple =hidden_sizes
UpperCamelCase__ : List[Any] =depths
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : Optional[int] =hidden_act
UpperCamelCase__ : Dict =num_labels
UpperCamelCase__ : List[str] =scope
UpperCamelCase__ : Optional[Any] =len(lowercase_ )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Optional[int] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Tuple =None
if self.use_labels:
UpperCamelCase__ : str =ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ : List[str] =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
UpperCamelCase__ : Any =RegNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Union[str, Any] =model(lowercase_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
UpperCamelCase__ : List[str] =self.num_labels
UpperCamelCase__ : Optional[Any] =RegNetForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[int] =model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =config_and_inputs
UpperCamelCase__ : str ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Optional[int] =RegNetModelTester(self )
UpperCamelCase__ : Optional[int] =ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Dict ):
return
@unittest.skip(reason='''RegNet does not use inputs_embeds''' )
def _lowerCAmelCase ( self : List[Any] ):
pass
@unittest.skip(reason='''RegNet does not support input and output embeddings''' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ , UpperCamelCase__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(lowercase_ )
UpperCamelCase__ : List[str] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] =[*signature.parameters.keys()]
UpperCamelCase__ : str =['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ , UpperCamelCase__ : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[Any] =model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def _lowerCAmelCase ( self : List[Any] ):
def check_hidden_states_output(lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] ):
UpperCamelCase__ : int =model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[str] =model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ : Tuple =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] =self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Optional[Any] =['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ : Dict =layer_type
UpperCamelCase__ : Dict =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : int =True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def _lowerCAmelCase ( self : Any ):
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Union[str, Any] =RegNetModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCamelCase__ : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : List[Any] =RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
UpperCamelCase__ : Union[str, Any] =self.default_image_processor
UpperCamelCase__ : Any =prepare_img()
UpperCamelCase__ : Optional[Any] =image_processor(images=lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : Dict =model(**lowercase_ )
# verify the logits
UpperCamelCase__ : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 157
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( a_: int ):
if number < 0:
raise ValueError("number must not be negative" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a = logging.get_logger(__name__)
__a = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''instructblip_vision_model'''
def __init__( self : str , lowerCAmelCase__ : Dict=1_4_0_8 , lowerCAmelCase__ : int=6_1_4_4 , lowerCAmelCase__ : List[str]=3_9 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : Tuple=2_2_4 , lowerCAmelCase__ : Tuple=1_4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Union[str, Any]=1e-6 , lowerCAmelCase__ : Tuple=0.0 , lowerCAmelCase__ : Optional[int]=1e-10 , lowerCAmelCase__ : Dict=True , **lowerCAmelCase__ : str , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Union[str, Any] = num_attention_heads
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : List[Any] = image_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = attention_dropout
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Tuple = qkv_bias
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Any ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : int = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = '''instructblip_qformer'''
def __init__( self : List[str] , lowerCAmelCase__ : Union[str, Any]=3_0_5_2_2 , lowerCAmelCase__ : Dict=7_6_8 , lowerCAmelCase__ : Tuple=1_2 , lowerCAmelCase__ : Optional[Any]=1_2 , lowerCAmelCase__ : Union[str, Any]=3_0_7_2 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=0.1 , lowerCAmelCase__ : Dict=5_1_2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : Optional[int]=1e-12 , lowerCAmelCase__ : Dict=0 , lowerCAmelCase__ : Union[str, Any]="absolute" , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : int=1_4_0_8 , **lowerCAmelCase__ : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Tuple = hidden_dropout_prob
_UpperCAmelCase : List[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[str] = layer_norm_eps
_UpperCAmelCase : Tuple = position_embedding_type
_UpperCAmelCase : Tuple = cross_attention_frequency
_UpperCAmelCase : Any = encoder_hidden_size
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : Union[str, os.PathLike] , **lowerCAmelCase__ : Optional[int] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : List[str] = cls.get_config_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
_UpperCAmelCase : Tuple = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(lowerCAmelCase__ , **lowerCAmelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : List[str] = '''instructblip'''
UpperCamelCase_ : Dict = True
def __init__( self : Tuple , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=3_2 , **lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
super().__init__(**lowerCAmelCase__ )
if vision_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
_UpperCAmelCase : Tuple = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
_UpperCAmelCase : int = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
_UpperCAmelCase : List[str] = InstructBlipVisionConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = InstructBlipQFormerConfig(**lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = text_config["model_type"] if "model_type" in text_config else "opt"
_UpperCAmelCase : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCAmelCase__ )
_UpperCAmelCase : Dict = self.text_config.tie_word_embeddings
_UpperCAmelCase : List[Any] = self.text_config.is_encoder_decoder
_UpperCAmelCase : List[str] = num_query_tokens
_UpperCAmelCase : int = self.vision_config.hidden_size
_UpperCAmelCase : Tuple = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase : int = 1.0
_UpperCAmelCase : Dict = 0.02
@classmethod
def _lowerCAmelCase ( cls : Dict , lowerCAmelCase__ : InstructBlipVisionConfig , lowerCAmelCase__ : InstructBlipQFormerConfig , lowerCAmelCase__ : PretrainedConfig , **lowerCAmelCase__ : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCAmelCase__ , )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : List[Any] = self.qformer_config.to_dict()
_UpperCAmelCase : List[Any] = self.text_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
| 145
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=9_9 , lowerCAmelCase=3_2 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=3_7 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=5_1_2 , lowerCAmelCase=1_6 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
__lowercase= parent
__lowercase= 1_3
__lowercase= 7
__lowercase= True
__lowercase= True
__lowercase= True
__lowercase= True
__lowercase= 9_9
__lowercase= 3_8_4
__lowercase= 2
__lowercase= 4
__lowercase= 3_7
__lowercase= 'gelu'
__lowercase= 0.1
__lowercase= 0.1
__lowercase= 5_1_2
__lowercase= 1_6
__lowercase= 2
__lowercase= 0.02
__lowercase= 3
__lowercase= 4
__lowercase= 1_2_8
__lowercase= 2
__lowercase= 9
__lowercase= 1
__lowercase= None
def _A (self ):
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase= None
if self.use_input_mask:
__lowercase= random_attention_mask([self.batch_size, self.seq_length] )
__lowercase= None
if self.use_token_type_ids:
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase= None
__lowercase= None
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase= ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase= ids_tensor([self.batch_size] , self.num_choices )
__lowercase= ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertModel(config=lowerCAmelCase )
__lowercase= {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase= [input_ids, input_mask]
__lowercase= model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertForMaskedLM(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFConvBertForSequenceClassification(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_choices
__lowercase= TFConvBertForMultipleChoice(config=lowerCAmelCase )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= tf.tile(tf.expand_dims(lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase= {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= TFConvBertForTokenClassification(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= TFConvBertForQuestionAnswering(config=lowerCAmelCase )
__lowercase= {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase= model(lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
(
__lowercase
)= config_and_inputs
__lowercase= {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A ( _a , _a , unittest.TestCase ):
UpperCamelCase_ : List[Any] =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ : Tuple =(
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ : int =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : List[Any] =False
def _A (self ):
__lowercase= TFConvBertModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.config_tester.run_common_tests()
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@slow
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= True
__lowercase= True
if hasattr(lowerCAmelCase , 'use_cache' ):
__lowercase= True
__lowercase= getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
for model_class in self.all_model_classes:
__lowercase= self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
__lowercase= model_class(lowerCAmelCase )
__lowercase= len(model(lowerCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase , saved_model=lowerCAmelCase )
__lowercase= os.path.join(lowerCAmelCase , 'saved_model' , '1' )
__lowercase= tf.keras.models.load_model(lowerCAmelCase )
__lowercase= model(lowerCAmelCase )
if self.is_encoder_decoder:
__lowercase= outputs['encoder_hidden_states']
__lowercase= outputs['encoder_attentions']
else:
__lowercase= outputs['hidden_states']
__lowercase= outputs['attentions']
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
__lowercase= getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _A (self ):
__lowercase= TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs_for_common()
__lowercase= True
__lowercase= getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
__lowercase= getattr(self.model_tester , 'key_length' , lowerCAmelCase )
def check_decoder_attentions_output(lowerCAmelCase ):
__lowercase= len(lowerCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase= outputs.decoder_attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCAmelCase ):
__lowercase= [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase= True
__lowercase= False
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= len(lowerCAmelCase )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
if self.is_encoder_decoder:
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_decoder_attentions_output(lowerCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase= True
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
# Check attention is always last and order is fine
__lowercase= True
__lowercase= True
__lowercase= model_class(lowerCAmelCase )
__lowercase= model(self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCAmelCase )
check_encoder_attentions_output(lowerCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def _A (self ):
__lowercase= TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase= tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase= model(lowerCAmelCase )[0]
__lowercase= [1, 6, 7_6_8]
self.assertEqual(output.shape , lowerCAmelCase )
__lowercase= tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCAmelCase , atol=1E-4 )
| 368
|
def _lowerCamelCase( lowercase__ = 1_0_0_0 ) -> int:
'''simple docstring'''
__lowercase= 2**power
__lowercase= str(lowercase__ )
__lowercase= list(lowercase__ )
__lowercase= 0
for i in list_num:
sum_of_num += int(lowercase__ )
return sum_of_num
if __name__ == "__main__":
lowerCAmelCase = int(input('''Enter the power of 2: ''').strip())
print('''2 ^ ''', power, ''' = ''', 2**power)
lowerCAmelCase = solution(power)
print('''Sum of the digits is: ''', result)
| 304
| 0
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> list[str]:
return [sentence[i : i + ngram_size] for i in range(len(_lowerCAmelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 208
|
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> np.array:
__lowerCamelCase : Any = F'{sampling_rate}'
__lowerCamelCase : List[str] = '1'
__lowerCamelCase : int = 'f32le'
__lowerCamelCase : Dict = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(_lowerCAmelCase ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCamelCase : Tuple = ffmpeg_process.communicate(_lowerCAmelCase )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__lowerCamelCase : Any = output_stream[0]
__lowerCamelCase : Union[str, Any] = np.frombuffer(_lowerCAmelCase ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = "f32le" ,) -> Dict:
__lowerCamelCase : Optional[Any] = F'{sampling_rate}'
__lowerCamelCase : Optional[int] = '1'
if format_for_conversion == "s16le":
__lowerCamelCase : List[Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Tuple = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__lowerCamelCase : Any = platform.system()
if system == "Linux":
__lowerCamelCase : Tuple = 'alsa'
__lowerCamelCase : Optional[Any] = 'default'
elif system == "Darwin":
__lowerCamelCase : Union[str, Any] = 'avfoundation'
__lowerCamelCase : Tuple = ':0'
elif system == "Windows":
__lowerCamelCase : List[str] = 'dshow'
__lowerCamelCase : Optional[Any] = 'default'
__lowerCamelCase : Optional[int] = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCamelCase : int = _ffmpeg_stream(_lowerCAmelCase ,_lowerCAmelCase )
for item in iterator:
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = None ,_lowerCAmelCase = None ,_lowerCAmelCase = "f32le" ,) -> List[str]:
if stream_chunk_s is not None:
__lowerCamelCase : int = stream_chunk_s
else:
__lowerCamelCase : List[Any] = chunk_length_s
__lowerCamelCase : Dict = ffmpeg_microphone(_lowerCAmelCase ,_lowerCAmelCase ,format_for_conversion=_lowerCAmelCase )
if format_for_conversion == "s16le":
__lowerCamelCase : List[str] = np.intaa
__lowerCamelCase : Union[str, Any] = 2
elif format_for_conversion == "f32le":
__lowerCamelCase : Union[str, Any] = np.floataa
__lowerCamelCase : Optional[Any] = 4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__lowerCamelCase : Any = chunk_length_s / 6
__lowerCamelCase : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowerCAmelCase ,(int, float) ):
__lowerCamelCase : Tuple = [stride_length_s, stride_length_s]
__lowerCamelCase : Union[str, Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCamelCase : Optional[Any] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCamelCase : Dict = datetime.datetime.now()
__lowerCamelCase : Any = datetime.timedelta(seconds=_lowerCAmelCase )
for item in chunk_bytes_iter(_lowerCAmelCase ,_lowerCAmelCase ,stride=(stride_left, stride_right) ,stream=_lowerCAmelCase ):
# Put everything back in numpy scale
__lowerCamelCase : Optional[int] = np.frombuffer(item['raw'] ,dtype=_lowerCAmelCase )
__lowerCamelCase : Tuple = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__lowerCamelCase : Optional[int] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = False ) -> str:
__lowerCamelCase : Optional[int] = b''
__lowerCamelCase ,__lowerCamelCase : Any = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__lowerCamelCase : str = 0
for raw in iterator:
acc += raw
if stream and len(_lowerCAmelCase ) < chunk_len:
__lowerCamelCase : Any = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowerCAmelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCamelCase : Any = (_stride_left, stride_right)
__lowerCamelCase : Optional[int] = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
__lowerCamelCase : List[str] = False
yield item
__lowerCamelCase : Tuple = stride_left
__lowerCamelCase : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowerCAmelCase ) > stride_left:
__lowerCamelCase : Tuple = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__lowerCamelCase : List[str] = False
yield item
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> Tuple:
__lowerCamelCase : int = 2**24 # 16Mo
try:
with subprocess.Popen(_lowerCAmelCase ,stdout=subprocess.PIPE ,bufsize=_lowerCAmelCase ) as ffmpeg_process:
while True:
__lowerCamelCase : Union[str, Any] = ffmpeg_process.stdout.read(_lowerCAmelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 208
| 1
|
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase (__snake_case ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase="None" , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
lowercase__: Dict = parent
lowercase__: Dict = batch_size
lowercase__: Optional[int] = seq_length
lowercase__: Union[str, Any] = is_training
lowercase__: int = use_input_mask
lowercase__: int = use_token_type_ids
lowercase__: Tuple = use_labels
lowercase__: Any = vocab_size
lowercase__: Union[str, Any] = hidden_size
lowercase__: Optional[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Tuple = hidden_dropout_prob
lowercase__: Any = attention_probs_dropout_prob
lowercase__: Any = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: Optional[int] = type_sequence_label_size
lowercase__: int = initializer_range
lowercase__: Dict = num_labels
lowercase__: List[str] = num_choices
lowercase__: int = relative_attention
lowercase__: List[Any] = position_biased_input
lowercase__: Dict = pos_att_type
lowercase__: Optional[Any] = scope
def _snake_case ( self ):
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: str = None
if self.use_input_mask:
lowercase__: Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase__: Dict = None
if self.use_token_type_ids:
lowercase__: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Any = None
lowercase__: int = None
lowercase__: Tuple = None
if self.use_labels:
lowercase__: int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__: List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__: Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase__: List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _snake_case ( self ):
lowercase__: Tuple = self.get_config()
lowercase__: str = 300
return config
def _snake_case ( self , _UpperCAmelCase ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Optional[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowercase__: str = model(a_ , token_type_ids=a_ )[0]
lowercase__: Dict = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Optional[int] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase__: List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = self.num_labels
lowercase__: Union[str, Any] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase__: Optional[int] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: int = self.num_labels
lowercase__: str = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Union[str, Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[str] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase__: Any = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
lowercase__: int = self.prepare_config_and_inputs()
(
lowercase__
): Tuple = config_and_inputs
lowercase__: Tuple = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase (__snake_case ,__snake_case ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Union[str, Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :Optional[int] = True
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Union[str, Any] = False
_UpperCAmelCase :Dict = False
def _snake_case ( self ):
lowercase__: List[str] = DebertaModelTester(self )
lowercase__: List[str] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
lowercase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def _snake_case ( self ):
lowercase__: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def _snake_case ( self ):
lowercase__: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def _snake_case ( self ):
lowercase__: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def _snake_case ( self ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__: Tuple = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
lowercase__: Any = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
lowercase__: str = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowercase__: Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase__: Optional[int] = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowercase__: int = torch.tensor(
[[[-0.5_986, -0.8_055, -0.8_462], [1.4_484, -0.9_348, -0.8_059], [0.3_123, 0.0_032, -1.4_131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 351
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"Salesforce/codegen-350M-nl": "https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json",
"Salesforce/codegen-350M-multi": "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json",
"Salesforce/codegen-350M-mono": "https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json",
"Salesforce/codegen-2B-nl": "https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json",
"Salesforce/codegen-2B-multi": "https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json",
"Salesforce/codegen-2B-mono": "https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json",
"Salesforce/codegen-6B-nl": "https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json",
"Salesforce/codegen-6B-multi": "https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json",
"Salesforce/codegen-6B-mono": "https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json",
"Salesforce/codegen-16B-nl": "https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json",
"Salesforce/codegen-16B-multi": "https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json",
"Salesforce/codegen-16B-mono": "https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = "codegen"
_UpperCAmelCase :Optional[int] = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=50400 , _UpperCAmelCase=2048 , _UpperCAmelCase=2048 , _UpperCAmelCase=4096 , _UpperCAmelCase=28 , _UpperCAmelCase=16 , _UpperCAmelCase=64 , _UpperCAmelCase=None , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=50256 , _UpperCAmelCase=50256 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: int = vocab_size
lowercase__: str = n_ctx
lowercase__: List[Any] = n_positions
lowercase__: Union[str, Any] = n_embd
lowercase__: Optional[Any] = n_layer
lowercase__: str = n_head
lowercase__: List[Any] = n_inner
lowercase__: Union[str, Any] = rotary_dim
lowercase__: Optional[Any] = activation_function
lowercase__: Union[str, Any] = resid_pdrop
lowercase__: Optional[int] = embd_pdrop
lowercase__: Optional[Any] = attn_pdrop
lowercase__: Optional[int] = layer_norm_epsilon
lowercase__: List[Any] = initializer_range
lowercase__: Tuple = use_cache
lowercase__: Any = bos_token_id
lowercase__: Any = eos_token_id
super().__init__(
bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: int = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' )
lowercase__: int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: Tuple = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: Optional[int] = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Union[str, Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Any = seqlen + 2
lowercase__: List[str] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__: Optional[Any] = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Optional[Any] = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: List[str] = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[Any] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2
| 0
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"width_multiplier" ) )
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : List[str] ,A : List[Any] ,A : Optional[int]=13 ,A : Dict=64 ,A : Optional[Any]=2 ,A : Optional[int]=3 ,A : int="swish" ,A : Tuple=3 ,A : Tuple=32 ,A : int=0.1 ,A : Any=0.02 ,A : Any=True ,A : Optional[int]=True ,A : Tuple=10 ,A : Any=None ,A : Any=0.25 ,A : Tuple=0.0 ,A : Optional[int]=0.0 ,):
__A = parent
__A = batch_size
__A = image_size
__A = patch_size
__A = num_channels
__A = make_divisible(5_12 * width_multiplier ,divisor=8 )
__A = hidden_act
__A = conv_kernel_size
__A = output_stride
__A = classifier_dropout_prob
__A = use_labels
__A = is_training
__A = num_labels
__A = initializer_range
__A = scope
__A = width_multiplier
__A = ffn_dropout
__A = attn_dropout
def UpperCamelCase_ ( self : Tuple ):
__A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.num_labels )
__A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
__A = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase_ ( self : List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def UpperCamelCase_ ( self : int ,A : Any ,A : Any ,A : Union[str, Any] ,A : Optional[int] ):
__A = MobileViTVaModel(config=A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : List[Any] ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : Tuple ):
__A = self.num_labels
__A = MobileViTVaForImageClassification(A )
model.to(A )
model.eval()
__A = model(A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : List[Any] ,A : Dict ,A : List[str] ,A : Union[str, Any] ,A : int ):
__A = self.num_labels
__A = MobileViTVaForSemanticSegmentation(A )
model.to(A )
model.eval()
__A = model(A )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
__A = model(A ,labels=A )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def UpperCamelCase_ ( self : Dict ):
__A = self.prepare_config_and_inputs()
__A , __A , __A , __A = config_and_inputs
__A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = MobileViTVaModelTester(self )
__A = MobileViTVaConfigTester(self ,config_class=A ,has_text_modality=A )
def UpperCamelCase_ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def UpperCamelCase_ ( self : List[str] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def UpperCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def UpperCamelCase_ ( self : List[Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def UpperCamelCase_ ( self : int ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase_ ( self : Optional[int] ):
pass
def UpperCamelCase_ ( self : Dict ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = model_class(A )
__A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A = [*signature.parameters.keys()]
__A = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
def check_hidden_states_output(A : Dict ,A : Optional[int] ,A : Any ):
__A = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__A = model(**self._prepare_for_class(A ,A ) )
__A = outputs.hidden_states
__A = 5
self.assertEqual(len(A ) ,A )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__A = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A = True
check_hidden_states_output(A ,A ,A )
def UpperCamelCase_ ( self : Any ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = MobileViTVaModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
__A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase_ ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : str ):
__A = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
A )
__A = self.default_image_processor
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
# verify the logits
__A = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,A )
__A = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : int ):
__A = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__A = model.to(A )
__A = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
__A = outputs.logits
# verify the logits
__A = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape ,A )
__A = torch.tensor(
[
[[7.08_63, 7.15_25, 6.82_01], [6.69_31, 6.87_70, 6.89_33], [6.29_78, 7.03_66, 6.96_36]],
[[-3.71_34, -3.67_12, -3.66_75], [-3.58_25, -3.35_49, -3.47_77], [-3.34_35, -3.39_79, -3.28_57]],
[[-2.93_29, -2.80_03, -2.73_69], [-3.05_64, -2.47_80, -2.02_07], [-2.68_89, -1.92_98, -1.76_40]],
] ,device=A ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__A = model.to(A )
__A = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
__A = prepare_img()
__A = image_processor(images=A ,return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
__A = model(**A )
__A = outputs.logits.detach().cpu()
__A = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(50, 60)] )
__A = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape ,A )
__A = image_processor.post_process_semantic_segmentation(outputs=A )
__A = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape ,A )
| 15
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
__A = {}
__A = job["started_at"]
__A = job["completed_at"]
__A = date_parser.parse(a_ )
__A = date_parser.parse(a_ )
__A = round((end_datetime - start_datetime).total_seconds() / 60.0 )
__A = start
__A = end
__A = duration_in_min
return job_info
def UpperCAmelCase ( a_ , a_=None ) -> str:
"""simple docstring"""
__A = None
if token is not None:
__A = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
__A = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__A = requests.get(a_ , headers=a_ ).json()
__A = {}
try:
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
__A = math.ceil((result["total_count"] - 1_0_0) / 1_0_0 )
for i in range(a_ ):
__A = requests.get(url + F'''&page={i + 2}''' , headers=a_ ).json()
job_time.update({job["name"]: extract_time_from_single_job(a_ ) for job in result["jobs"]} )
return job_time
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
SCREAMING_SNAKE_CASE :Optional[int] = parser.parse_args()
SCREAMING_SNAKE_CASE :Union[str, Any] = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'''{k}: {v["duration"]}''')
| 15
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json',
'allenai/longformer-large-4096': 'https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json',
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'
),
}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = """longformer"""
def __init__( self : Any , UpperCamelCase__ : Union[List[int], int] = 512 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 0 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3_0522 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 3072 , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : float = 1E-12 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = attention_window
SCREAMING_SNAKE_CASE : int = sep_token_id
SCREAMING_SNAKE_CASE : Tuple = bos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = eos_token_id
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE : List[str] = type_vocab_size
SCREAMING_SNAKE_CASE : str = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : List[Any] = onnx_export
class lowercase__ ( UpperCamelCase_):
def __init__( self : int , UpperCamelCase__ : "PretrainedConfig" , UpperCamelCase__ : str = "default" , UpperCamelCase__ : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = True
@property
def __A ( self : List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''global_attention_mask''', dynamic_axis),
] )
@property
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = super().outputs
if self.task == "default":
SCREAMING_SNAKE_CASE : Dict = {0: '''batch'''}
return outputs
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
@property
def __A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def __A ( self : Optional[int] , UpperCamelCase__ : "PreTrainedTokenizerBase" , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = super().generate_dummy_inputs(
preprocessor=UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
SCREAMING_SNAKE_CASE : Dict = torch.zeros_like(inputs['''input_ids'''] )
# make every second token global
SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return inputs
| 258
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Dict , a : Union[str, Any] , a : List[str]=13 , a : Optional[Any]=7 , a : Tuple=True , a : Dict=True , a : List[Any]=True , a : Union[str, Any]=True , a : List[str]=99 , a : Any=32 , a : List[str]=5 , a : List[Any]=4 , a : Dict=37 , a : List[str]="gelu" , a : List[str]=0.1 , a : Dict=0.1 , a : Optional[int]=128 , a : Dict=32 , a : Any=16 , a : Optional[Any]=2 , a : Dict=0.02 , a : str=3 , a : Union[str, Any]=4 , a : str=None , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = parent
SCREAMING_SNAKE_CASE : List[str] = batch_size
SCREAMING_SNAKE_CASE : Any = seq_length
SCREAMING_SNAKE_CASE : Tuple = is_training
SCREAMING_SNAKE_CASE : Optional[int] = use_input_mask
SCREAMING_SNAKE_CASE : List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Tuple = hidden_size
SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
SCREAMING_SNAKE_CASE : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE : Dict = initializer_range
SCREAMING_SNAKE_CASE : List[str] = num_labels
SCREAMING_SNAKE_CASE : Union[str, Any] = num_choices
SCREAMING_SNAKE_CASE : Optional[int] = scope
def __UpperCamelCase ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Dict = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Tuple = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : List[Any] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self : str ) -> List[str]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE : Tuple = True
SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self : Union[str, Any] , a : str , a : int , a : List[Any] , a : Tuple , a : Optional[Any] , a : Optional[int] , a : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = NezhaModel(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a , token_type_ids=a )
SCREAMING_SNAKE_CASE : List[Any] = model(a , token_type_ids=a )
SCREAMING_SNAKE_CASE : Optional[int] = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Dict , a : Any , a : Tuple , a : Dict , a : str , a : List[str] , a : Optional[Any] , a : List[str] , a : Tuple , a : str , ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = True
SCREAMING_SNAKE_CASE : Optional[int] = NezhaModel(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
SCREAMING_SNAKE_CASE : List[Any] = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
SCREAMING_SNAKE_CASE : Optional[int] = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self : Any , a : Any , a : str , a : str , a : Union[str, Any] , a : Union[str, Any] , a : str , a : Dict ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , a : List[str] , a : Dict , a : Dict , a : Any , a : str , a : Optional[int] , a : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : Tuple , a : Tuple , a : List[Any] , a : Tuple , a : Any , a : int , a : int , a : int ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self : str , a : List[Any] , a : Tuple , a : Union[str, Any] , a : Any , a : Optional[int] , a : List[Any] , a : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : List[str] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Optional[int] , a : int , a : str , a : Any , a : Union[str, Any] , a : str , a : Union[str, Any] , a : Dict ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE : Any = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Optional[Any] , a : Any , a : Tuple , a : List[Any] , a : Any , a : Dict , a : Union[str, Any] , a : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.num_labels
SCREAMING_SNAKE_CASE : Dict = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : str , a : List[str] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : str , a : Tuple , a : Union[str, Any] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_choices
SCREAMING_SNAKE_CASE : str = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
SCREAMING_SNAKE_CASE : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE : Optional[Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __A , __A , __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ =(
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ =True
def __UpperCamelCase ( self : Optional[int] , a : Any , a : Optional[int] , a : str=False ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
SCREAMING_SNAKE_CASE : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = NezhaModelTester(self )
SCREAMING_SNAKE_CASE : List[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def __UpperCamelCase ( self : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def __UpperCamelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
(
(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,(
SCREAMING_SNAKE_CASE
) ,
) : Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def __UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def __UpperCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def __UpperCamelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def __UpperCamelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def __UpperCamelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def __UpperCamelCase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Optional[Any] = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def __UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE : int = model_class(config=a )
SCREAMING_SNAKE_CASE : Union[str, Any] = self._prepare_for_class(a , a )
SCREAMING_SNAKE_CASE : List[Any] = torch.jit.trace(
a , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , "bert.pt" ) )
SCREAMING_SNAKE_CASE : Tuple = torch.jit.load(os.path.join(a , "bert.pt" ) , map_location=a )
loaded(inputs_dict["input_ids"].to(a ) , inputs_dict["attention_mask"].to(a ) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = NezhaModel.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Any = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = NezhaForMaskedLM.from_pretrained("sijunhe/nezha-cn-base" )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE : Any = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE : Optional[Any] = model(a , attention_mask=a )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.Size((1, 6, 2_1128) )
self.assertEqual(output.shape , a )
SCREAMING_SNAKE_CASE : str = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1e-4 ) )
| 76
|
def _UpperCamelCase ( snake_case__ ) -> bool:
if not isinstance(snake_case__, snake_case__ ):
raise ValueError("check_bouncy() accepts only integer arguments" )
__UpperCAmelCase : Optional[int] = str(snake_case__ )
__UpperCAmelCase : Any = "".join(sorted(snake_case__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _UpperCamelCase ( snake_case__ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
__UpperCAmelCase : List[str] = 0
__UpperCAmelCase : int = 1
while True:
if check_bouncy(snake_case__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'{solution(99)}')
| 157
| 0
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase__( unittest.TestCase):
def __init__( self: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: str=3 , UpperCamelCase_: Optional[Any]=32 , UpperCamelCase_: Optional[int]=3 , UpperCamelCase_: Dict=10 , UpperCamelCase_: Union[str, Any]=[10, 20, 30, 40] , UpperCamelCase_: Dict=[1, 1, 2, 1] , UpperCamelCase_: int=True , UpperCamelCase_: List[Any]=True , UpperCamelCase_: Union[str, Any]="relu" , UpperCamelCase_: List[str]=3 , UpperCamelCase_: List[str]=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = image_size
__lowerCamelCase = num_channels
__lowerCamelCase = embeddings_size
__lowerCamelCase = hidden_sizes
__lowerCamelCase = depths
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = hidden_act
__lowerCamelCase = num_labels
__lowerCamelCase = scope
__lowerCamelCase = len(UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase = self.get_config()
return config, pixel_values
def lowerCAmelCase__ ( self: List[Any] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: int , UpperCamelCase_: Dict ):
__lowerCamelCase = FlaxRegNetModel(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[str] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = FlaxRegNetForImageClassification(config=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[str] = False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = FlaxRegNetModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self: Union[str, Any] ):
return
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def lowerCAmelCase__ ( self: Optional[int] ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def lowerCAmelCase__ ( self: str ):
pass
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase = [*signature.parameters.keys()]
__lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
def check_hidden_states_output(UpperCamelCase_: Tuple , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: Dict ):
__lowerCamelCase = model_class(UpperCamelCase_ )
__lowerCamelCase = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
__lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCamelCase = self.model_tester.num_stages
self.assertEqual(len(UpperCamelCase_ ) , expected_num_stages + 1 )
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = model_class(UpperCamelCase_ )
@jax.jit
def model_jitted(UpperCamelCase_: Tuple , **UpperCamelCase_: str ):
return model(pixel_values=UpperCamelCase_ , **UpperCamelCase_ )
with self.subTest("""JIT Enabled""" ):
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__lowerCamelCase = model_jitted(**UpperCamelCase_ ).to_tuple()
self.assertEqual(len(UpperCamelCase_ ) , len(UpperCamelCase_ ) )
for jitted_output, output in zip(UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class lowerCamelCase__( unittest.TestCase):
@cached_property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
__lowerCamelCase = self.default_image_processor
__lowerCamelCase = prepare_img()
__lowerCamelCase = image_processor(images=UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = model(**UpperCamelCase_ )
# verify the logits
__lowerCamelCase = (1, 10_00)
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
__lowerCamelCase = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 365
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : int = "bert-generation"
def __init__( self , a=5_0_3_5_8 , a=1_0_2_4 , a=2_4 , a=1_6 , a=4_0_9_6 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=0.02 , a=1e-12 , a=0 , a=2 , a=1 , a="absolute" , a=True , **a , ) -> Optional[int]:
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
lowercase__ : List[Any] = vocab_size
lowercase__ : List[str] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : str = num_attention_heads
lowercase__ : List[str] = hidden_act
lowercase__ : str = intermediate_size
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[Any] = initializer_range
lowercase__ : str = layer_norm_eps
lowercase__ : Tuple = position_embedding_type
lowercase__ : Tuple = use_cache
| 77
|
'''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
_UpperCamelCase : Any = logging.getLogger(__name__)
class snake_case__ ( UpperCamelCase):
a_ = "masked_bert"
def __init__( self : str , _A : Dict=3_05_22 , _A : Dict=7_68 , _A : Union[str, Any]=12 , _A : str=12 , _A : str=30_72 , _A : Dict="gelu" , _A : int=0.1 , _A : Optional[Any]=0.1 , _A : Any=5_12 , _A : Union[str, Any]=2 , _A : Union[str, Any]=0.02 , _A : int=1e-12 , _A : Any=0 , _A : Any="topK" , _A : List[str]="constant" , _A : Dict=0.0 , **_A : int , ) -> Union[str, Any]:
super().__init__(pad_token_id=_A , **_A )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = hidden_act
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : List[str] = type_vocab_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = pruning_method
UpperCAmelCase_ : Optional[int] = mask_init
UpperCAmelCase_ : List[Any] = mask_scale
| 304
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = s.rsplit(A_ , A_ )
return new.join(A_ )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _lowerCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase = {}
UpperCAmelCase = ['''group_1''', '''group_2''', '''group_3''', '''group_4''']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
UpperCAmelCase = key.replace(F'''{group_key}.''' , F'''{group_key}.group.''' )
if "res_path" in key:
UpperCAmelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
UpperCAmelCase = rreplace(A_ , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
UpperCAmelCase = rreplace(A_ , """.b""" , """.bias""" , 1 )
UpperCAmelCase = value.float()
return upgrade
@torch.no_grad()
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=True ):
'''simple docstring'''
from dall_e import Encoder
UpperCAmelCase = Encoder()
if os.path.exists(A_ ):
UpperCAmelCase = torch.load(A_ )
else:
UpperCAmelCase = torch.hub.load_state_dict_from_url(A_ )
if isinstance(A_ , A_ ):
UpperCAmelCase = ckpt.state_dict()
encoder.load_state_dict(A_ )
if config_path is not None:
UpperCAmelCase = FlavaImageCodebookConfig.from_pretrained(A_ )
else:
UpperCAmelCase = FlavaImageCodebookConfig()
UpperCAmelCase = FlavaImageCodebook(A_ ).eval()
UpperCAmelCase = encoder.state_dict()
UpperCAmelCase = upgrade_state_dict(A_ )
hf_model.load_state_dict(A_ )
UpperCAmelCase = hf_model.state_dict()
UpperCAmelCase = count_parameters(A_ )
UpperCAmelCase = count_parameters(A_ )
assert torch.allclose(A_ , A_ , atol=1e-3 )
if save_checkpoint:
hf_model.save_pretrained(A_ )
else:
return hf_state_dict
if __name__ == "__main__":
lowerCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
lowerCAmelCase_ : Dict = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 358
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=lowerCAmelCase )
UpperCAmelCase = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=lowerCAmelCase )
env_command_parser(subparsers=lowerCAmelCase )
launch_command_parser(subparsers=lowerCAmelCase )
tpu_command_parser(subparsers=lowerCAmelCase )
test_command_parser(subparsers=lowerCAmelCase )
# Let's go
UpperCAmelCase = parser.parse_args()
if not hasattr(lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(lowerCAmelCase )
if __name__ == "__main__":
main()
| 248
| 0
|
from collections import defaultdict
from math import gcd
def lowerCamelCase__ ( snake_case_ : int = 150_0000 ) -> int:
__snake_case = defaultdict(snake_case_ )
__snake_case = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , snake_case_ , 2 ):
if gcd(snake_case_ , snake_case_ ) > 1:
continue
__snake_case = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(snake_case_ , limit + 1 , snake_case_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : int = {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json',
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = """mvp"""
lowerCAmelCase__ : Optional[Any] = ["""past_key_values"""]
lowerCAmelCase__ : List[str] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__(self : Any , UpperCamelCase : Optional[int]=50267 , UpperCamelCase : Tuple=1024 , UpperCamelCase : int=12 , UpperCamelCase : Tuple=4096 , UpperCamelCase : Dict=16 , UpperCamelCase : int=12 , UpperCamelCase : Optional[int]=4096 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : Tuple=0.0 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Union[str, Any]=1024 , UpperCamelCase : Optional[Any]=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : str=0.0 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=1 , UpperCamelCase : int=0 , UpperCamelCase : int=2 , UpperCamelCase : Any=True , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Tuple=False , UpperCamelCase : int=100 , UpperCamelCase : Optional[Any]=800 , **UpperCamelCase : str , ):
'''simple docstring'''
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = classifier_dropout
lowercase__ = use_cache
lowercase__ = encoder_layers
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_prompt
lowercase__ = prompt_length
lowercase__ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCamelCase , bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , is_encoder_decoder=UpperCamelCase , decoder_start_token_id=UpperCamelCase , forced_eos_token_id=UpperCamelCase , **UpperCamelCase , )
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' , UpperCamelCase ):
lowercase__ = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'''The config can simply be saved and uploaded again to be fixed.''' )
| 2
| 0
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , snake_case__="" , snake_case__="train" ):
"""simple docstring"""
assert os.path.isdir(snake_case__ )
lowerCAmelCase : Any = []
lowerCAmelCase : List[str] = os.listdir(snake_case__ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase : int = os.path.join(snake_case__ , snake_case__ )
if not os.path.isfile(snake_case__ ):
continue
self.documents.append(snake_case__ )
def __len__( self ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = self.documents[idx]
lowerCAmelCase : Optional[int] = document_path.split("/" )[-1]
with open(snake_case__ , encoding="utf-8" ) as source:
lowerCAmelCase : List[str] = source.read()
lowerCAmelCase , lowerCAmelCase : str = process_story(snake_case__ )
return document_name, story_lines, summary_lines
def a__ ( SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : str = list(filter(lambda SCREAMING_SNAKE_CASE : len(SCREAMING_SNAKE_CASE ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase : Dict = [_add_missing_period(SCREAMING_SNAKE_CASE ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase : Optional[int] = []
lowerCAmelCase : List[Any] = deque(SCREAMING_SNAKE_CASE )
while True:
try:
lowerCAmelCase : Dict = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(SCREAMING_SNAKE_CASE )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase : Dict = list(filter(lambda SCREAMING_SNAKE_CASE : not t.startswith("@highlight" ) , SCREAMING_SNAKE_CASE ) )
return story_lines, summary_lines
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : Dict = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def a__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE )) )
return sequence
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase : str = torch.ones_like(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Union[str, Any] = sequence == pad_token_id
lowerCAmelCase : List[str] = 0
return mask
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
lowerCAmelCase : str = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in story_lines]
lowerCAmelCase : Any = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase : Optional[int] = [tokenizer.encode(SCREAMING_SNAKE_CASE ) for line in summary_lines]
lowerCAmelCase : Union[str, Any] = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : List[Any] = []
for sequence in batch:
lowerCAmelCase : str = -1
lowerCAmelCase : str = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE )
return torch.tensor(SCREAMING_SNAKE_CASE )
| 133
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Tuple = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowerCAmelCase : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[int] = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowerCAmelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
lowerCAmelCase : Union[str, Any] = "fp16"
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[Any] = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowerCAmelCase : Dict = "fp16"
self.assertFalse(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
lowerCAmelCase : int = "fp16"
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : str = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
lowerCAmelCase : Tuple = "fp16"
self.assertTrue(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
lowerCAmelCase : List[Any] = "fp16"
self.assertFalse(is_safetensors_compatible(snake_case__ , variant=snake_case__ ) )
| 133
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase : List[Any] = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258
| 1
|
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( ):
"""simple docstring"""
UpperCamelCase = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase = json.loads(_SCREAMING_SNAKE_CASE )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase = json.loads(_SCREAMING_SNAKE_CASE )
if not mpi_options.get("sagemaker_mpi_enabled" , _SCREAMING_SNAKE_CASE ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def snake_case_ (self ) -> Optional[int]:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , __a , )
@cached_property
def snake_case_ (self ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
UpperCamelCase = torch.device("cpu" )
UpperCamelCase = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase = smp.local_rank()
UpperCamelCase = torch.device("cuda" , __a )
UpperCamelCase = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
UpperCamelCase = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
UpperCamelCase = torch.device("cuda" , self.local_rank )
UpperCamelCase = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
UpperCamelCase = torch.device("cuda" , self.local_rank )
UpperCamelCase = 1
if device.type == "cuda":
torch.cuda.set_device(__a )
return device
@property
def snake_case_ (self ) -> Union[str, Any]:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def snake_case_ (self ) -> Dict:
return not is_sagemaker_model_parallel_available()
@property
def snake_case_ (self ) -> Optional[int]:
return False
| 244
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCAmelCase__ = NewType('''DataClass''', Any)
lowerCAmelCase__ = NewType('''DataClassType''', Any)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)." )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = {str(_SCREAMING_SNAKE_CASE ): choice for choice in choices}
return lambda _SCREAMING_SNAKE_CASE : str_to_choice.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( *,
_SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = dataclasses.MISSING , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
UpperCamelCase = {}
if aliases is not None:
UpperCamelCase = aliases
if help is not None:
UpperCamelCase = help
return dataclasses.field(metadata=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , default_factory=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( _lowercase ):
UpperCAmelCase_ = 42
def __init__(self , __a , **__a ) -> Any:
# To make the default appear when using --help
if "formatter_class" not in kwargs:
UpperCamelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__a )
if dataclasses.is_dataclass(__a ):
UpperCamelCase = [dataclass_types]
UpperCamelCase = list(__a )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__a )
@staticmethod
def snake_case_ (__a , __a ) -> Optional[Any]:
UpperCamelCase = F"--{field.name}"
UpperCamelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __a ):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default" )
UpperCamelCase = kwargs.pop("aliases" , [] )
if isinstance(__a , __a ):
UpperCamelCase = [aliases]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
if origin_type is Union or (hasattr(__a , "UnionType" ) and isinstance(__a , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__a ) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
F" Problem encountered in field '{field.name}'." )
if type(__a ) not in field.type.__args__:
# filter `str` in Union
UpperCamelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
UpperCamelCase = (
field.type.__args__[0] if isinstance(__a , field.type.__args__[1] ) else field.type.__args__[1]
)
UpperCamelCase = getattr(field.type , "__origin__" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
UpperCamelCase = {}
if origin_type is Literal or (isinstance(field.type , __a ) and issubclass(field.type , __a )):
if origin_type is Literal:
UpperCamelCase = field.type.__args__
else:
UpperCamelCase = [x.value for x in field.type]
UpperCamelCase = make_choice_type_function(kwargs["choices"] )
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
else:
UpperCamelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
UpperCamelCase = copy(__a )
# Hack because type=bool in argparse does not behave as we want.
UpperCamelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
UpperCamelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
UpperCamelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
UpperCamelCase = "?"
# This is the value that will get picked if we do --field_name (without value)
UpperCamelCase = True
elif isclass(__a ) and issubclass(__a , __a ):
UpperCamelCase = field.type.__args__[0]
UpperCamelCase = "+"
if field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
UpperCamelCase = True
else:
UpperCamelCase = field.type
if field.default is not dataclasses.MISSING:
UpperCamelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
UpperCamelCase = field.default_factory()
else:
UpperCamelCase = True
parser.add_argument(__a , *__a , **__a )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
UpperCamelCase = False
parser.add_argument(F"--no_{field.name}" , action="store_false" , dest=field.name , **__a )
def snake_case_ (self , __a ) -> List[Any]:
if hasattr(__a , "_argument_group_name" ):
UpperCamelCase = self.add_argument_group(dtype._argument_group_name )
else:
UpperCamelCase = self
try:
UpperCamelCase = get_type_hints(__a )
except NameError:
raise RuntimeError(
F"Type resolution failed for {dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__a ):
UpperCamelCase = ".".join(map(__a , sys.version_info[:3] ) )
raise RuntimeError(
F"Type resolution failed for {dtype} on Python {python_version}. Try removing "
"line of `from __future__ import annotations` which opts in union types as "
"`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To "
"support Python versions that lower than 3.10, you need to use "
"`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of "
"`X | None`." ) from ex
raise
for field in dataclasses.fields(__a ):
if not field.init:
continue
UpperCamelCase = type_hints[field.name]
self._parse_dataclass_field(__a , __a )
def snake_case_ (self , __a=None , __a=False , __a=True , __a=None , __a=None , ) -> Tuple[DataClass, ...]:
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
UpperCamelCase = []
if args_filename:
args_files.append(Path(__a ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(".args" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
UpperCamelCase = ArgumentParser()
args_file_parser.add_argument(__a , type=__a , action="append" )
# Use only remaining args for further parsing (remove the args_file_flag)
UpperCamelCase , UpperCamelCase = args_file_parser.parse_known_args(args=__a )
UpperCamelCase = vars(__a ).get(args_file_flag.lstrip("-" ) , __a )
if cmd_args_file_paths:
args_files.extend([Path(__a ) for p in cmd_args_file_paths] )
UpperCamelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
UpperCamelCase = file_args + args if args is not None else file_args + sys.argv[1:]
UpperCamelCase , UpperCamelCase = self.parse_known_args(args=__a )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in vars(__a ).items() if k in keys}
for k in keys:
delattr(__a , __a )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__a )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"Some specified arguments are not used by the HfArgumentParser: {remaining_args}" )
return (*outputs,)
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = set(args.keys() )
UpperCamelCase = []
for dtype in self.dataclass_types:
UpperCamelCase = {f.name for f in dataclasses.fields(__a ) if f.init}
UpperCamelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
UpperCamelCase = dtype(**__a )
outputs.append(__a )
if not allow_extra_keys and unused_keys:
raise ValueError(F"Some keys are not used by the HfArgumentParser: {sorted(__a )}" )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
with open(Path(__a ) , encoding="utf-8" ) as open_json_file:
UpperCamelCase = json.loads(open_json_file.read() )
UpperCamelCase = self.parse_dict(__a , allow_extra_keys=__a )
return tuple(__a )
def snake_case_ (self , __a , __a = False ) -> Tuple[DataClass, ...]:
UpperCamelCase = self.parse_dict(yaml.safe_load(Path(__a ).read_text() ) , allow_extra_keys=__a )
return tuple(__a )
| 244
| 1
|
from __future__ import annotations
__A = list[list[int]]
# assigning initial values to the grid
__A = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__A = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase__ : Matrix , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> Union[str, Any]:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> Optional[int]:
"""simple docstring"""
if location := find_empty_location(__snake_case ):
__lowerCamelCase = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
__lowerCamelCase = digit
if sudoku(__snake_case ) is not None:
return grid
__lowerCamelCase = 0
return None
def lowerCamelCase_ ( UpperCamelCase__ : Matrix ) -> List[str]:
"""simple docstring"""
for row in grid:
for cell in row:
print(__snake_case , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
__A = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 90
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29
| 0
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( _lowercase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : List[Any] = KandinskyVaaPriorPipeline
lowerCAmelCase : str = ['prompt']
lowerCAmelCase : Optional[Any] = ['prompt', 'negative_prompt']
lowerCAmelCase : Any = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowerCAmelCase : List[str] = False
@property
def lowerCamelCase_ ( self : int ):
return 32
@property
def lowerCamelCase_ ( self : str ):
return 32
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
return self.time_input_dim
@property
def lowerCamelCase_ ( self : str ):
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self : Tuple ):
return 1_00
@property
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def lowerCamelCase_ ( self : List[str] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(__UpperCamelCase )
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
UpperCAmelCase_ = PriorTransformer(**__UpperCamelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCAmelCase_ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
UpperCAmelCase_ = CLIPVisionModelWithProjection(__UpperCamelCase )
return model
@property
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=__UpperCamelCase , do_normalize=__UpperCamelCase , do_resize=__UpperCamelCase , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ = self.dummy_prior
UpperCAmelCase_ = self.dummy_image_encoder
UpperCAmelCase_ = self.dummy_text_encoder
UpperCAmelCase_ = self.dummy_tokenizer
UpperCAmelCase_ = self.dummy_image_processor
UpperCAmelCase_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=__UpperCamelCase , clip_sample_range=10.0 , )
UpperCAmelCase_ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def lowerCamelCase_ ( self : Tuple , __snake_case : Tuple , __snake_case : Optional[int]=0 ):
if str(__UpperCamelCase ).startswith('''mps''' ):
UpperCAmelCase_ = torch.manual_seed(__UpperCamelCase )
else:
UpperCAmelCase_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCAmelCase_ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = '''cpu'''
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = self.pipeline_class(**__UpperCamelCase )
UpperCAmelCase_ = pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ = pipe(**self.get_dummy_inputs(__UpperCamelCase ) )
UpperCAmelCase_ = output.image_embeds
UpperCAmelCase_ = pipe(
**self.get_dummy_inputs(__UpperCamelCase ) , return_dict=__UpperCamelCase , )[0]
UpperCAmelCase_ = image[0, -10:]
UpperCAmelCase_ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCAmelCase_ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = torch_device == '''cpu'''
UpperCAmelCase_ = True
UpperCAmelCase_ = False
self._test_inference_batch_single_identical(
test_max_difference=__UpperCamelCase , relax_max_difference=__UpperCamelCase , test_mean_pixel_difference=__UpperCamelCase , )
@skip_mps
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = torch_device == '''cpu'''
UpperCAmelCase_ = False
self._test_attention_slicing_forward_pass(
test_max_difference=__UpperCamelCase , test_mean_pixel_difference=__UpperCamelCase , )
| 360
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = 'data2vec-text'
def __init__( self : Optional[Any] , __snake_case : Optional[int]=3_05_22 , __snake_case : List[str]=7_68 , __snake_case : Tuple=12 , __snake_case : int=12 , __snake_case : Union[str, Any]=30_72 , __snake_case : List[Any]="gelu" , __snake_case : Any=0.1 , __snake_case : Union[str, Any]=0.1 , __snake_case : Tuple=5_12 , __snake_case : str=2 , __snake_case : str=0.02 , __snake_case : List[Any]=1E-12 , __snake_case : Any=1 , __snake_case : List[Any]=0 , __snake_case : Dict=2 , __snake_case : Any="absolute" , __snake_case : Union[str, Any]=True , __snake_case : Any=None , **__snake_case : List[Any] , ):
super().__init__(pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class a ( _A ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self : str ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 177
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCAmelCase__ = random.Random()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: Optional[int]=1.0 , SCREAMING_SNAKE_CASE_: Tuple=None , SCREAMING_SNAKE_CASE_: Dict=None ) -> List[Any]:
'''simple docstring'''
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=400 , lowercase=2000 , lowercase=1 , lowercase=0.0 , lowercase=16000 , lowercase=True , lowercase=80 , lowercase=16 , lowercase=64 , lowercase="hann_window" , lowercase=80 , lowercase=7600 , lowercase=1e-10 , lowercase=True , ) -> Union[str, Any]:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = feature_size
A__ = padding_value
A__ = sampling_rate
A__ = do_normalize
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = return_attention_mask
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def UpperCamelCase ( self , lowercase=False , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
def _flatten(lowercase ):
return list(itertools.chain(*lowercase ) )
if equal_length:
A__ = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
A__ = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
def UpperCamelCase ( self , lowercase=False , lowercase=False ) -> Union[str, Any]:
'''simple docstring'''
if equal_length:
A__ = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
class a__ ( snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = SpeechTaFeatureExtractor
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = SpeechTaFeatureExtractionTester(self )
def UpperCamelCase ( self , lowercase ) -> Dict:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test not batched input
A__ = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
A__ = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test batched
A__ = feat_extract(lowercase , return_tensors="np" ).input_values
A__ = feat_extract(lowercase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = ["longest", "max_length", "do_not_pad"]
A__ = [None, 1600, None]
for max_length, padding in zip(lowercase , lowercase ):
A__ = feat_extract(lowercase , padding=lowercase , max_length=lowercase , return_tensors="np" )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = range(800 , 1400 , 200 )
A__ = [floats_list((1, x) )[0] for x in lengths]
A__ = ["longest", "max_length", "do_not_pad"]
A__ = [None, 1600, None]
for max_length, padding in zip(lowercase , lowercase ):
A__ = feat_extract(lowercase , max_length=lowercase , padding=lowercase )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = feat_extract(
lowercase , truncation=lowercase , max_length=1000 , padding="max_length" , return_tensors="np" )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = feat_extract(
lowercase , truncation=lowercase , max_length=1000 , padding="longest" , return_tensors="np" )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = feat_extract(
lowercase , truncation=lowercase , max_length=2000 , padding="longest" , return_tensors="np" )
A__ = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
A__ = np.random.rand(100 ).astype(np.floataa )
A__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
A__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
A__ = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = [np.asarray(lowercase ) for speech_input in speech_inputs]
# Test feature size
A__ = feature_extractor(audio_target=lowercase , padding=lowercase , return_tensors="np" ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
A__ = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_values
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test batched
A__ = feature_extractor(lowercase , return_tensors="np" ).input_values
A__ = feature_extractor(lowercase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(lowercase )
A__ = feature_extractor(lowercase , return_tensors="np" ).input_values
A__ = feature_extractor(lowercase , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowercase , lowercase ):
self.assertTrue(np.allclose(lowercase , lowercase , atol=1e-3 ) )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowercase ) == len(lowercase ) for x, y in zip(lowercase , processed_features[input_name] ) ) )
A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase )
A__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.feat_extract_tester.prepare_inputs_for_target(equal_length=lowercase )
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
A__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
A__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.feature_extraction_class(**self.feat_extract_dict )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="np" )[input_name]
A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**lowercase )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = [len(lowercase ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(lowercase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowercase )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.feat_extract_dict
A__ = True
A__ = self.feature_extraction_class(**lowercase )
A__ = self.feat_extract_tester.prepare_inputs_for_target()
A__ = [len(lowercase ) for x in speech_inputs]
A__ = feat_extract.model_input_names[0]
A__ = BatchFeature({input_name: speech_inputs} )
A__ = min(lowercase )
A__ = feat_extract.num_mel_bins # hack!
A__ = feat_extract.pad(
lowercase , padding="max_length" , max_length=lowercase , truncation=lowercase , return_tensors="np" )
self.assertIn("attention_mask" , lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def UpperCamelCase ( self , lowercase ) -> Tuple:
'''simple docstring'''
from datasets import load_dataset
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(lowercase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = torch.tensor(
[2.38_04e-03, 2.07_52e-03, 1.98_36e-03, 2.10_57e-03, 1.61_74e-03,
3.05_18e-04, 9.15_53e-05, 3.35_69e-04, 9.76_56e-04, 1.83_11e-03,
2.01_42e-03, 2.10_57e-03, 1.73_95e-03, 4.57_76e-04, -3.96_73e-04,
4.57_76e-04, 1.00_71e-03, 9.15_53e-05, 4.88_28e-04, 1.15_97e-03,
7.32_42e-04, 9.46_04e-04, 1.80_05e-03, 1.83_11e-03, 8.85_01e-04,
4.27_25e-04, 4.88_28e-04, 7.32_42e-04, 1.09_86e-03, 2.10_57e-03] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = SpeechTaFeatureExtractor()
A__ = feature_extractor(lowercase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30] , lowercase , atol=1e-6 ) )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
A__ = self._load_datasamples(1 )
A__ = SpeechTaFeatureExtractor()
A__ = feature_extractor(audio_target=lowercase , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , lowercase , atol=1e-4 ) )
| 68
|
def _UpperCAmelCase ( ):
'''simple docstring'''
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9)
for b in range(a__ , 9_9_9)
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 248
| 0
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__snake_case = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__snake_case = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__snake_case = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None , __UpperCAmelCase="warn" , ) -> str:
_a = recall_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase , zero_division=__UpperCAmelCase , )
return {"recall": float(__UpperCAmelCase ) if score.size == 1 else score}
| 153
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 153
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = [
"decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
_UpperCAmelCase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = torch.load(snake_case_ , map_location="cpu" )
_UpperCAmelCase = Namespace(**checkpoint["cfg"]["model"] )
_UpperCAmelCase = checkpoint["model"]
remove_ignore_keys_(snake_case_ )
_UpperCAmelCase = state_dict["decoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()}
_UpperCAmelCase = XGLMConfig(
vocab_size=snake_case_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
_UpperCAmelCase = XGLMForCausalLM(snake_case_ )
_UpperCAmelCase = model.load_state_dict(snake_case_ , strict=snake_case_ )
print(snake_case_ )
_UpperCAmelCase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
lowercase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowercase_ : str = parser.parse_args()
lowercase_ : Dict = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 133
|
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
snake_case_ : Any = KandinskyVaaPriorPipeline
snake_case_ : List[str] = ["prompt"]
snake_case_ : int = ["prompt", "negative_prompt"]
snake_case_ : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
snake_case_ : List[Any] = False
@property
def UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self : Tuple ):
"""simple docstring"""
return 32
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return 100
@property
def UpperCamelCase ( self : Any ):
"""simple docstring"""
_UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(snake_case__ )
@property
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_UpperCAmelCase = PriorTransformer(**snake_case__ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_UpperCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def UpperCamelCase ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_UpperCAmelCase = CLIPVisionModelWithProjection(snake_case__ )
return model
@property
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=snake_case__ , do_normalize=snake_case__ , do_resize=snake_case__ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=224 , )
return image_processor
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = self.dummy_prior
_UpperCAmelCase = self.dummy_image_encoder
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_image_processor
_UpperCAmelCase = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_000 , clip_sample=snake_case__ , clip_sample_range=10.0 , )
_UpperCAmelCase = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def UpperCamelCase ( self : str , snake_case__ : int , snake_case__ : Optional[Any]=0 ):
"""simple docstring"""
if str(snake_case__ ).startswith("mps" ):
_UpperCAmelCase = torch.manual_seed(snake_case__ )
else:
_UpperCAmelCase = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_UpperCAmelCase = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def UpperCamelCase ( self : List[str] ):
"""simple docstring"""
_UpperCAmelCase = "cpu"
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case__ )
_UpperCAmelCase = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_UpperCAmelCase = pipe(**self.get_dummy_inputs(snake_case__ ) )
_UpperCAmelCase = output.image_embeds
_UpperCAmelCase = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_UpperCAmelCase = image[0, -10:]
_UpperCAmelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_UpperCAmelCase = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def UpperCamelCase ( self : Dict ):
"""simple docstring"""
_UpperCAmelCase = torch_device == "cpu"
_UpperCAmelCase = True
_UpperCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=snake_case__ , relax_max_difference=snake_case__ , test_mean_pixel_difference=snake_case__ , )
@skip_mps
def UpperCamelCase ( self : int ):
"""simple docstring"""
_UpperCAmelCase = torch_device == "cpu"
_UpperCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case__ , test_mean_pixel_difference=snake_case__ , )
| 133
| 1
|
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=400 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=False , ):
lowercase_ :Optional[Any] = size if size is not None else {'''height''': 20, '''width''': 20}
lowercase_ :int = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
lowercase_ :List[str] = parent
lowercase_ :List[Any] = batch_size
lowercase_ :Tuple = num_channels
lowercase_ :str = image_size
lowercase_ :Optional[Any] = min_resolution
lowercase_ :int = max_resolution
lowercase_ :str = do_resize
lowercase_ :List[str] = size
lowercase_ :List[str] = do_center_crop
lowercase_ :Optional[int] = crop_size
lowercase_ :Optional[Any] = do_normalize
lowercase_ :int = image_mean
lowercase_ :Optional[int] = image_std
lowercase_ :Dict = do_reduce_labels
def UpperCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def UpperCamelCase ( ) -> Optional[int]:
'''simple docstring'''
lowercase_ :int = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ :List[str] = Image.open(dataset[0]['''file'''] )
lowercase_ :List[str] = Image.open(dataset[1]['''file'''] )
return image, map
def UpperCamelCase ( ) -> Tuple:
'''simple docstring'''
lowercase_ :Dict = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase_ :Optional[Any] = Image.open(ds[0]['''file'''] )
lowercase_ :List[str] = Image.open(ds[1]['''file'''] )
lowercase_ :List[Any] = Image.open(ds[2]['''file'''] )
lowercase_ :Any = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Optional[Any] =BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ):
lowercase_ :Dict = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''center_crop''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ )
lowercase_ :Dict = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=UpperCamelCase_ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , UpperCamelCase_ )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
# Initialize image_processing
lowercase_ :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
lowercase_ :Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ):
# Initialize image_processing
lowercase_ :Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ :Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
lowercase_ :Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ :List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ):
# Initialize image_processing
lowercase_ :Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ :List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
lowercase_ :List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
lowercase_ :str = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ):
# Initialize image_processing
lowercase_ :Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ :Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
lowercase_ :Tuple = []
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
lowercase_ :Tuple = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched
lowercase_ :Optional[int] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test not batched input (PIL images)
lowercase_ , lowercase_ :List[Any] = prepare_semantic_single_inputs()
lowercase_ :Union[str, Any] = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
# Test batched input (PIL images)
lowercase_ , lowercase_ :Tuple = prepare_semantic_batch_inputs()
lowercase_ :str = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
def UpperCamelCase ( self ):
# Initialize image_processing
lowercase_ :Tuple = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
lowercase_ , lowercase_ :Tuple = prepare_semantic_single_inputs()
lowercase_ :Dict = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 150 )
lowercase_ :Dict = True
lowercase_ :Tuple = image_processing(UpperCamelCase_ , UpperCamelCase_ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 255 )
| 252
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCamelCase :
'''simple docstring'''
lowercase : Any =PegasusConfig
lowercase : Any ={}
lowercase : int ="""gelu"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=40 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=0 , ):
lowercase_ :Union[str, Any] = parent
lowercase_ :Tuple = batch_size
lowercase_ :Optional[Any] = seq_length
lowercase_ :Any = is_training
lowercase_ :Optional[int] = use_labels
lowercase_ :Optional[int] = vocab_size
lowercase_ :Optional[Any] = hidden_size
lowercase_ :List[Any] = num_hidden_layers
lowercase_ :Tuple = num_attention_heads
lowercase_ :Optional[Any] = intermediate_size
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :Optional[Any] = attention_probs_dropout_prob
lowercase_ :Any = max_position_embeddings
lowercase_ :Any = eos_token_id
lowercase_ :List[str] = pad_token_id
lowercase_ :Optional[Any] = bos_token_id
def UpperCamelCase ( self ):
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ :Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ :int = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ :Optional[int] = prepare_pegasus_inputs_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
return config, inputs_dict
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Optional[Any] = TFPegasusModel(config=UpperCamelCase_ ).get_decoder()
lowercase_ :Tuple = inputs_dict['''input_ids''']
lowercase_ :Optional[int] = input_ids[:1, :]
lowercase_ :Tuple = inputs_dict['''attention_mask'''][:1, :]
lowercase_ :List[str] = inputs_dict['''head_mask''']
lowercase_ :int = 1
# first forward pass
lowercase_ :Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
lowercase_ , lowercase_ :Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ :List[str] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ :Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ :Optional[int] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ :Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ :Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )[0]
lowercase_ :str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ :Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ :Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ :Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase_ , UpperCamelCase_ , rtol=1E-3 )
def UpperCamelCase ( _a , _a , _a , _a=None , _a=None , _a=None , _a=None , _a=None , ) -> Optional[int]:
'''simple docstring'''
if attention_mask is None:
lowercase_ :Dict = tf.cast(tf.math.not_equal(_a , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ :Dict = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ :List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ :Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ :Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowercase : List[str] =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowercase : str =(
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase : Optional[int] =True
lowercase : List[str] =False
lowercase : Union[str, Any] =False
def UpperCamelCase ( self ):
lowercase_ :Dict = TFPegasusModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : Tuple =[
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
lowercase : Optional[int] =[
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowercase : Optional[Any] ="""google/pegasus-xsum"""
@cached_property
def UpperCamelCase ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Any = self.translate_src_text(**UpperCamelCase_ )
assert self.expected_text == generated_words
def UpperCamelCase ( self , **UpperCamelCase_ ):
lowercase_ :Dict = self.tokenizer(self.src_text , **UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''tf''' )
lowercase_ :int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase_ , )
lowercase_ :int = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase_ )
return generated_words
@slow
def UpperCamelCase ( self ):
self._assert_generated_batch_equal_expected()
| 252
| 1
|
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCamelCase_ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCamelCase_ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCamelCase_ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCamelCase_ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCamelCase_ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCamelCase_ = ''''''
lowerCamelCase_ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCamelCase_ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCamelCase_ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __magic_name__ ( __a : Any , __a : Optional[Any] ):
'''simple docstring'''
assert ReadMe.from_string(__a , __a ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __magic_name__ ( __a : Tuple , __a : List[str] ):
'''simple docstring'''
with pytest.raises(__a , match=re.escape(expected_error.format(path="""root""" ) ) ):
UpperCamelCase__ = ReadMe.from_string(__a , __a )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __magic_name__ ( __a : str , __a : Optional[Any] ):
'''simple docstring'''
with pytest.raises(__a , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__a , __a )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __magic_name__ ( __a : Optional[Any] ):
'''simple docstring'''
ReadMe.from_string(__a , __a , suppress_parsing_errors=__a )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
UpperCamelCase__ = ReadMe.from_readme(__a , __a ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def __magic_name__ ( __a : List[Any] , __a : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
UpperCamelCase__ = expected_error.format(path=__a )
with pytest.raises(__a , match=re.escape(__a ) ):
UpperCamelCase__ = ReadMe.from_readme(__a , __a )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def __magic_name__ ( __a : Any , __a : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
UpperCamelCase__ = expected_error.format(path=__a )
with pytest.raises(__a , match=re.escape(__a ) ):
ReadMe.from_readme(__a , __a )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def __magic_name__ ( __a : Any ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = Path(__a ) / """README.md"""
with open(__a , """w+""" ) as readme_file:
readme_file.write(__a )
ReadMe.from_readme(__a , __a , suppress_parsing_errors=__a )
| 244
|
from collections.abc import Generator
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = 0, 1
while True:
UpperCamelCase__ , UpperCamelCase__ = b, a + b
yield b
def __magic_name__ ( __a : int = 1_000 ):
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = fibonacci_generator()
while len(str(next(__a ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 244
| 1
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase: Optional[Any] = '.'
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
lowerCAmelCase: Dict = [
'Assert',
'AssignVariableOp',
'EmptyTensorList',
'MergeV2Checkpoints',
'ReadVariableOp',
'ResourceGather',
'RestoreV2',
'SaveV2',
'ShardedFilename',
'StatefulPartitionedCall',
'StaticRegexFullMatch',
'VarHandleOp',
]
def lowerCamelCase__ ( _A , _A , _A ):
a : Optional[int] = SavedModel()
a : int = []
with open(os.path.join(_A , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
a : List[str] = json.load(_A )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(_A )] )
with open(_A , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
a : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
a : List[str] = sorted(_A )
a : Union[str, Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(_A )
if strict and len(_A ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(_A ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*_A , sep='\n' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
lowerCAmelCase: Optional[int] = argparse.ArgumentParser()
parser.add_argument('--saved_model_path', help='Path of the saved model to check (the .pb file).')
parser.add_argument(
'--opset', default=1_2, type=int, help='The ONNX opset against which the model has to be tested.'
)
parser.add_argument(
'--framework', choices=['onnx'], default='onnx', help='Frameworks against which to test the saved model.'
)
parser.add_argument(
'--strict', action='store_true', help='Whether make the checking strict (raise errors) or not (raise warnings)'
)
lowerCAmelCase: Any = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 371
|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase: List[str] = 'examples/'
lowerCAmelCase: List[Any] = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase: str = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase: str = 'README.md'
def lowerCamelCase__ ( _A , _A , _A ):
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.read()
a , a : Tuple = REPLACE_PATTERNS[pattern]
a : Dict = replace.replace('VERSION' , _A )
a : Dict = re_pattern.sub(_A , _A )
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(_A )
def lowerCamelCase__ ( _A ):
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern='examples' )
def lowerCamelCase__ ( _A , _A=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def lowerCamelCase__ ( ):
a : Tuple = '🤗 Transformers currently provides the following architectures'
a : Any = '1. Want to contribute a new model?'
with open(_A , 'r' , encoding='utf-8' , newline='\n' ) as f:
a : Tuple = f.readlines()
# Find the start of the list.
a : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
a : List[Any] = lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(_A , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(_A )
def lowerCamelCase__ ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
a : Union[str, Any] = f.read()
a : Tuple = REPLACE_PATTERNS['init'][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def lowerCamelCase__ ( _A=False ):
a : int = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
a : Any = default_version.base_version
elif patch:
a : Dict = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a : Union[str, Any] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a : List[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
a : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def lowerCamelCase__ ( ):
a : int = get_version()
a : Any = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a : int = current_version.base_version
# Check with the user we got that right.
a : Tuple = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
a : Optional[int] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(_A )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 96
| 0
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A (__A : List[str] , __A : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
try:
UpperCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
snake_case_ : Any = parse_flag_from_env("RUN_SLOW", default=False)
def A (__A : Any ) -> Tuple:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__A )
def A (__A : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__A )
def A (__A : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__A )
def A (__A : Dict ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__A )
def A (__A : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__A )
def A (__A : List[Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__A )
def A (__A : int ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__A )
def A (__A : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__A )
def A (__A : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__A )
def A (__A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__A )
def A (__A : List[str] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__A )
def A (__A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__A )
def A (__A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__A )
def A (__A : Union[str, Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__A )
def A (__A : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__A )
def A (__A : Union[str, Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__A )
def A (__A : Any=None , __A : List[Any]=None ) -> List[str]:
"""simple docstring"""
if test_case is None:
return partial(__A , version=__A )
return unittest.skipUnless(is_torch_version('''>=''' , __A ) , F"""test requires torch version >= {version}""" )(__A )
def A (__A : Union[str, Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__A )
def A (__A : Union[str, Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__A )
def A (__A : Any ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__A )
snake_case_ : Optional[int] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A (__A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__A )
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCamelCase ( cls : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
@classmethod
def lowerCamelCase ( cls : Dict):
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_snake_case)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[mock.Mock, List[mock.Mock]]):
"""simple docstring"""
UpperCAmelCase_ = mocks if isinstance(_snake_case , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A (__A : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AcceleratorState()
UpperCAmelCase_ = tensor[None].clone().to(state.device )
UpperCAmelCase_ = gather(__A ).cpu()
UpperCAmelCase_ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __A ):
return False
return True
class __snake_case :
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = returncode
UpperCAmelCase_ = stdout
UpperCAmelCase_ = stderr
async def A (__A : List[Any] , __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
while True:
UpperCAmelCase_ = await stream.readline()
if line:
callback(__A )
else:
break
async def A (__A : Optional[int] , __A : Optional[int]=None , __A : Any=None , __A : List[Any]=None , __A : Any=False , __A : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__A ) )
UpperCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def tee(__A : Optional[int] , __A : List[Any] , __A : str , __A : Any="" ):
UpperCAmelCase_ = line.decode('''utf-8''' ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def A (__A : Dict , __A : Any=None , __A : Any=None , __A : List[Any]=180 , __A : Optional[int]=False , __A : Any=True ) -> _RunOutput:
"""simple docstring"""
UpperCAmelCase_ = asyncio.get_event_loop()
UpperCAmelCase_ = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
UpperCAmelCase_ = ''' '''.join(__A )
if result.returncode > 0:
UpperCAmelCase_ = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __snake_case ( a ):
pass
def A (__A : List[str] , __A : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase_ = subprocess.check_output(__A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__A , '''decode''' ):
UpperCAmelCase_ = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 51
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__A = False
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: List[str] = '''A painting of a squirrel eating a burger '''
lowercase__: str = torch.manual_seed(0 )
lowercase__: Union[str, Any] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_UpperCAmelCase )
lowercase__: Optional[Any] = VersatileDiffusionTextToImagePipeline.from_pretrained(_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = generator.manual_seed(0 )
lowercase__: List[str] = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self ):
lowercase__: Dict = VersatileDiffusionTextToImagePipeline.from_pretrained(
'''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Tuple = '''A painting of a squirrel eating a burger '''
lowercase__: Optional[Any] = torch.manual_seed(0 )
lowercase__: Tuple = pipe(
prompt=_UpperCAmelCase , generator=_UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase__: Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__: Any = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 177
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__A : int = pd.read_csv('''sample_data.csv''', header=None)
__A : Dict = df.shape[:1][0]
# If you're using some other dataset input the target column
__A : Dict = df.iloc[:, 1:2]
__A : Optional[Any] = actual_data.values.reshape(len_data, 1)
__A : List[str] = MinMaxScaler().fit_transform(actual_data)
__A : List[Any] = 10
__A : Tuple = 5
__A : Dict = 20
__A : Tuple = len_data - periods * look_back
__A : Optional[int] = actual_data[:division]
__A : Optional[int] = actual_data[division - look_back :]
__A , __A : Dict = [], []
__A , __A : Optional[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__A : str = np.array(train_x)
__A : List[Any] = np.array(test_x)
__A : Dict = np.array([list(i.ravel()) for i in train_y])
__A : Optional[int] = np.array([list(i.ravel()) for i in test_y])
__A : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
__A : Union[str, Any] = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__A : Any = model.predict(x_test)
| 363
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 323
| 0
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def wrapper(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
UpperCamelCase = timeit.default_timer()
UpperCamelCase = func(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = timeit.default_timer() - starttime
return delta
UpperCamelCase = func.__name__
return wrapper
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = []
UpperCamelCase = seq_shapes or {}
for i in range(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_SCREAMING_SNAKE_CASE , _ArrayXD ):
UpperCamelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Value ):
if v.dtype == "string":
UpperCamelCase = "The small grey turtle was surprisingly fast when challenged."
else:
UpperCamelCase = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
while isinstance(_SCREAMING_SNAKE_CASE , datasets.Sequence ):
UpperCamelCase = v.feature
UpperCamelCase = seq_shapes[k]
UpperCamelCase = np.random.rand(*_SCREAMING_SNAKE_CASE ).astype(v.dtype )
UpperCamelCase = data
dummy_data.append((i, example) )
return dummy_data
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=100 , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
UpperCamelCase = generate_examples(_SCREAMING_SNAKE_CASE , num_examples=_SCREAMING_SNAKE_CASE , seq_shapes=_SCREAMING_SNAKE_CASE )
with ArrowWriter(features=_SCREAMING_SNAKE_CASE , path=_SCREAMING_SNAKE_CASE ) as writer:
for key, record in dummy_data:
UpperCamelCase = features.encode_example(_SCREAMING_SNAKE_CASE )
writer.write(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." )
UpperCamelCase = datasets.Dataset.from_file(filename=_SCREAMING_SNAKE_CASE , info=datasets.DatasetInfo(features=_SCREAMING_SNAKE_CASE ) )
return dataset
| 153
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _lowerCamelCase :
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase = {"a": {"1": 1}, "b": 2}
UpperCamelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"a": 2, "b": 3}
UpperCamelCase = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase = {"a": {"1": 2}, "b": 3}
UpperCamelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
UpperCamelCase = 2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
UpperCamelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase = {"a": 2, "b": 0, "c": 2}
UpperCamelCase = {
"a": np.eye(2 ).astype(__a ),
"b": np.zeros(3 ).astype(__a ),
"c": np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda __a : x + 1 , __a , num_proc=__a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": 3, "b": 4}
UpperCamelCase = {"a": 5, "b": 6}
UpperCamelCase = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def snake_case_ (self ) -> Dict:
class _lowerCamelCase :
UpperCAmelCase_ = "bar"
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__a , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(_SCREAMING_SNAKE_CASE )}
UpperCamelCase = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCamelCase ( _lowercase ):
@require_tf
def snake_case_ (self ) -> str:
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case_ (self ) -> Tuple:
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case_ (self ) -> Tuple:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def a__ ( ):
"""simple docstring"""
UpperCamelCase = A(x=1 , y="foobar" )
UpperCamelCase = {"x": 1, "y": "foobar"}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
UpperCamelCase = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="foo" )] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return text.split()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ):
"""simple docstring"""
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 153
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Union[str, Any] = logging.get_logger(__name__)
A_ :Tuple = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Any ="""xlnet"""
UpperCamelCase__ : Tuple =["""mems"""]
UpperCamelCase__ : Any ={
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCamelCase__=32000 , lowerCamelCase__=1024 , lowerCamelCase__=24 , lowerCamelCase__=16 , lowerCamelCase__=4096 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__="bi" , lowerCamelCase__=0.02 , lowerCamelCase__=1E-12 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=None , lowerCamelCase__=True , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=-1 , lowerCamelCase__=False , lowerCamelCase__="last" , lowerCamelCase__=True , lowerCamelCase__="tanh" , lowerCamelCase__=0.1 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=5 , lowerCamelCase__=1 , lowerCamelCase__=2 , **lowerCamelCase__ , ):
"""simple docstring"""
__UpperCamelCase : Optional[int] =vocab_size
__UpperCamelCase : int =d_model
__UpperCamelCase : Optional[Any] =n_layer
__UpperCamelCase : str =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase : Optional[Any] =d_model // n_head
__UpperCamelCase : List[Any] =ff_activation
__UpperCamelCase : Tuple =d_inner
__UpperCamelCase : List[Any] =untie_r
__UpperCamelCase : List[Any] =attn_type
__UpperCamelCase : Dict =initializer_range
__UpperCamelCase : List[str] =layer_norm_eps
__UpperCamelCase : List[str] =dropout
__UpperCamelCase : int =mem_len
__UpperCamelCase : List[Any] =reuse_len
__UpperCamelCase : Union[str, Any] =bi_data
__UpperCamelCase : Optional[Any] =clamp_len
__UpperCamelCase : Tuple =same_length
__UpperCamelCase : int =summary_type
__UpperCamelCase : Dict =summary_use_proj
__UpperCamelCase : Dict =summary_activation
__UpperCamelCase : str =summary_last_dropout
__UpperCamelCase : Dict =start_n_top
__UpperCamelCase : Optional[Any] =end_n_top
__UpperCamelCase : int =bos_token_id
__UpperCamelCase : Union[str, Any] =pad_token_id
__UpperCamelCase : Dict =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowerCamelCase__ , )
__UpperCamelCase : Dict =kwargs['use_cache']
__UpperCamelCase : Optional[int] =use_mems_eval
__UpperCamelCase : Any =use_mems_train
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
@property
def __lowercase ( self ):
"""simple docstring"""
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def __lowercase ( self , lowerCamelCase__ ):
"""simple docstring"""
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 245
|
import itertools
import math
def A ( a_ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(a_ ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( ) -> Tuple:
__UpperCamelCase : Optional[Any] =2
while True:
if is_prime(a_ ):
yield num
num += 1
def A ( a_ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() ,nth - 1 ,a_ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 245
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowercase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[Any] = StableDiffusionDiffEditPipeline
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
UpperCamelCase : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
UpperCamelCase : List[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase : Tuple = frozenset([] )
def __A ( self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
lowerCamelCase = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
lowerCamelCase = CLIPTextModel(A )
lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __A ( self , A , A=0 ) -> str:
'''simple docstring'''
lowerCamelCase = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
lowerCamelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(A )
else:
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self , A , A=0 ) -> Any:
'''simple docstring'''
lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(A )
else:
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def __A ( self , A , A=0 ) -> str:
'''simple docstring'''
lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
lowerCamelCase = torch.manual_seed(A )
else:
lowerCamelCase = torch.Generator(device=A ).manual_seed(A )
lowerCamelCase = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def __A ( self ) -> Tuple:
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
lowerCamelCase = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , F'`{optional_component}` did not stay set to None after loading.' , )
lowerCamelCase = self.get_dummy_inputs(A )
lowerCamelCase = pipe_loaded(**A )[0]
lowerCamelCase = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_mask_inputs(A )
lowerCamelCase = pipe.generate_mask(**A )
lowerCamelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase = np.array([0] * 9 )
lowerCamelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inversion_inputs(A )
lowerCamelCase = pipe.invert(**A ).images
lowerCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu"""
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""}
lowerCamelCase = DPMSolverMultistepScheduler(**A )
lowerCamelCase = DPMSolverMultistepInverseScheduler(**A )
lowerCamelCase = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = self.get_dummy_inversion_inputs(A )
lowerCamelCase = pipe.invert(**A ).images
lowerCamelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __A ( cls ) -> Any:
'''simple docstring'''
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
lowerCamelCase = raw_image.convert("""RGB""" ).resize((7_68, 7_68) )
lowerCamelCase = raw_image
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
lowerCamelCase = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """a bowl of fruit"""
lowerCamelCase = """a bowl of pears"""
lowerCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
lowerCamelCase = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
lowerCamelCase = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
lowerCamelCase = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
lowerCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """a bowl of fruit"""
lowerCamelCase = """a bowl of pears"""
lowerCamelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
lowerCamelCase = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
lowerCamelCase = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
lowerCamelCase = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 252
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = 1
lowerCamelCase = 3
lowerCamelCase = (32, 32)
lowerCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A )
return image
@property
def __A ( self ) -> Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __A ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __A ( self ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(A )
@property
def __A ( self ) -> str:
'''simple docstring'''
def extract(*A , **A ):
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = torch.ones([0] )
def __A ( self , A ) -> Union[str, Any]:
'''simple docstring'''
self.pixel_values.to(A )
return self
return Out()
return extract
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(A )
lowerCamelCase = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=A , )
lowerCamelCase = output.images
lowerCamelCase = torch.Generator(device=A ).manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=A , return_dict=A , )[0]
lowerCamelCase = image[0, -3:, -3:, -1]
lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCamelCase = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = self.dummy_cond_unet
lowerCamelCase = PNDMScheduler(skip_prk_steps=A )
lowerCamelCase = self.dummy_vae
lowerCamelCase = self.dummy_text_encoder
lowerCamelCase = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
lowerCamelCase = 77
lowerCamelCase = self.dummy_image.to(A )
# put models in fp16
lowerCamelCase = unet.half()
lowerCamelCase = vae.half()
lowerCamelCase = bert.half()
# make sure here that pndm scheduler skips prk
lowerCamelCase = AltDiffusionImgaImgPipeline(
unet=A , scheduler=A , vae=A , text_encoder=A , tokenizer=A , safety_checker=A , feature_extractor=self.dummy_extractor , )
lowerCamelCase = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=A )
lowerCamelCase = alt_pipe.to(A )
alt_pipe.set_progress_bar_config(disable=A )
lowerCamelCase = """A painting of a squirrel eating a burger"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = alt_pipe(
[prompt] , generator=A , num_inference_steps=2 , output_type="""np""" , image=A , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCamelCase = init_image.resize((7_60, 5_04) )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type="""np""" , )
lowerCamelCase = output.images[0]
lowerCamelCase = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
lowerCamelCase = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
lowerCamelCase = init_image.resize((7_68, 5_12) )
lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
lowerCamelCase = """BAAI/AltDiffusion"""
lowerCamelCase = AltDiffusionImgaImgPipeline.from_pretrained(
A , safety_checker=A , )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
pipe.enable_attention_slicing()
lowerCamelCase = """A fantasy landscape, trending on artstation"""
lowerCamelCase = torch.manual_seed(0 )
lowerCamelCase = pipe(
prompt=A , image=A , strength=0.75 , guidance_scale=7.5 , generator=A , output_type="""np""" , )
lowerCamelCase = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 252
| 1
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (volume) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((moles * 0.08_21 * temperature) / (pressure) ) )
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ):
'''simple docstring'''
return round(float((pressure * volume) / (0.08_21 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __lowerCAmelCase ( *__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Union[Dict, Any]] = None , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=2 ):
'''simple docstring'''
from .. import __version__
__snake_case : List[Any] = take_from
__snake_case : List[Any] = ()
if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ):
__snake_case : str = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ):
raise ValueError(
F'''The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''''
F''' version {__version__} is >= {version_name}''' )
__snake_case : Optional[Any] = None
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),)
__snake_case : Optional[Any] = F'''The `{attribute}` argument is deprecated and will be removed in version {version_name}.'''
elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),)
__snake_case : Any = F'''The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'''
elif deprecated_kwargs is None:
__snake_case : Tuple = F'''`{attribute}` is deprecated and will be removed in version {version_name}.'''
if warning is not None:
__snake_case : Optional[Any] = warning + """ """ if standard_warn else """"""
warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0:
__snake_case : Dict = inspect.getouterframes(inspect.currentframe() )[1]
__snake_case : int = call_frame.filename
__snake_case : int = call_frame.lineno
__snake_case : List[str] = call_frame.function
__snake_case , __snake_case : List[Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F'''{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`''' )
if len(__SCREAMING_SNAKE_CASE ) == 0:
return
elif len(__SCREAMING_SNAKE_CASE ) == 1:
return values[0]
return values
| 20
| 1
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 290
|
"""simple docstring"""
import math
def _snake_case ( lowercase__ ):
return math.sqrt(lowercase__ ) * math.sqrt(lowercase__ ) == num
def _snake_case ( lowercase__ ):
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[Any] = n
while left <= right:
_lowerCamelCase : str = (left + right) // 2
if mid**2 == n:
return True
elif mid**2 > n:
_lowerCamelCase : str = mid - 1
else:
_lowerCamelCase : Optional[int] = mid + 1
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
def lowerCAmelCase__ ( a__: Optional[Any] ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = len(a__ )
for i in range(n - 1 ):
for j in range(i + 1 , a__ ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def lowerCAmelCase__ ( a__: Optional[int] ) -> List[str]:
'''simple docstring'''
if len(a__ ) <= 1:
return arr, 0
_UpperCAmelCase = len(a__ ) // 2
_UpperCAmelCase = arr[0:mid]
_UpperCAmelCase = arr[mid:]
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(a__ )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(a__ )
_UpperCAmelCase , _UpperCAmelCase = _count_cross_inversions(a__ , a__ )
_UpperCAmelCase = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def lowerCAmelCase__ ( a__: Tuple , a__: Optional[int] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = 0
while i < len(a__ ) and j < len(a__ ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(a__ ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(a__ ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def lowerCAmelCase__ ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [1_0, 2, 1, 5, 5, 2, 1_1]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
_UpperCAmelCase = count_inversions_bf(a__ )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 8
print('number of inversions = ' , a__ )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
_UpperCAmelCase = count_inversions_bf(a__ )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , a__ )
# an empty list should also have zero inversions
_UpperCAmelCase = []
_UpperCAmelCase = count_inversions_bf(a__ )
_UpperCAmelCase , _UpperCAmelCase = count_inversions_recursive(a__ )
assert num_inversions_bf == num_inversions_recursive == 0
print('number of inversions = ' , a__ )
if __name__ == "__main__":
main()
| 185
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ :Optional[int] = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ :Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ :Optional[int] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ :Any = model.state_dict()
lowerCAmelCase__ :Dict = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ :List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCAmelCase__ :Union[str, Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCAmelCase__ :str = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCAmelCase__ :List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCAmelCase__ :int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCAmelCase__ :Optional[int] = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ :List[str] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
lowerCAmelCase__ :List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 185
| 1
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase__ ( lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = RobertaTokenizer
lowerCAmelCase_ = RobertaTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = {'''cls_token''': '''<s>'''}
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowercase_ : List[str] = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowercase_ : List[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowercase_ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
lowercase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__SCREAMING_SNAKE_CASE ) )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : List[Any] = '''lower newer'''
lowercase_ : Dict = '''lower newer'''
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase_ : List[Any] = '''lower newer'''
lowercase_ : Union[str, Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
lowercase_ : Union[str, Any] = tokenizer.tokenize(__SCREAMING_SNAKE_CASE ) # , add_prefix_space=True)
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokens + [tokenizer.unk_token]
lowercase_ : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[str] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=__SCREAMING_SNAKE_CASE ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : List[Any] = self.tokenizer_class.from_pretrained('''roberta-base''' )
lowercase_ : Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _snake_case ( self ):
"""simple docstring"""
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Optional[Any] = '''Encode this sequence.'''
lowercase_ : Optional[Any] = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
lowercase_ : int = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowercase_ : Any = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Testing spaces after special tokens
lowercase_ : Any = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(__SCREAMING_SNAKE_CASE , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )} ) # mask token has a left space
lowercase_ : List[Any] = tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE )
lowercase_ : int = '''Encode <mask> sequence'''
lowercase_ : List[Any] = '''Encode <mask>sequence'''
lowercase_ : Tuple = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = encoded.index(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokenizer.encode(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = encoded.index(__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowercase_ : str = '''A, <mask> AllenNLP sentence.'''
lowercase_ : Optional[int] = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE , return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowercase_ : Union[str, Any] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowercase_ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def _snake_case ( self ):
"""simple docstring"""
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : List[Any] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''add_prefix_space'''] , __SCREAMING_SNAKE_CASE )
self.assertEqual(post_processor_state['''trim_offsets'''] , __SCREAMING_SNAKE_CASE )
def _snake_case ( self ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : int = F'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ) + 1, len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__SCREAMING_SNAKE_CASE ), len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : List[str] = F''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : str = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ) + 1, 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : Any = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : str = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
__SCREAMING_SNAKE_CASE , use_fast=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE , trim_offsets=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = tokenizer_r(__SCREAMING_SNAKE_CASE , return_offsets_mapping=__SCREAMING_SNAKE_CASE , add_special_tokens=__SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__SCREAMING_SNAKE_CASE ), 1 + len(__SCREAMING_SNAKE_CASE ) + 1 + len(__SCREAMING_SNAKE_CASE )) , )
| 93
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 42
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : List[str]=3 , lowerCamelCase_ : Any=("DownEncoderBlock2D",) , lowerCamelCase_ : List[Any]=(64,) , lowerCamelCase_ : Optional[Any]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : List[Any]="silu" , lowerCamelCase_ : Optional[int]=True , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[int] = layers_per_block
SCREAMING_SNAKE_CASE : int = torch.nn.Convad(
lowerCamelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : Optional[int] = nn.ModuleList([] )
# down
SCREAMING_SNAKE_CASE : Tuple = block_out_channels[0]
for i, down_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : Any = output_channel
SCREAMING_SNAKE_CASE : List[str] = block_out_channels[i]
SCREAMING_SNAKE_CASE : Union[str, Any] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : Optional[Any] = get_down_block(
lowerCamelCase_ , num_layers=self.layers_per_block , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
self.down_blocks.append(lowerCamelCase_ )
# mid
SCREAMING_SNAKE_CASE : Union[str, Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# out
SCREAMING_SNAKE_CASE : List[Any] = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : List[Any] = nn.SiLU()
SCREAMING_SNAKE_CASE : Dict = 2 * out_channels if double_z else out_channels
SCREAMING_SNAKE_CASE : List[Any] = nn.Convad(block_out_channels[-1] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Tuple = False
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = x
SCREAMING_SNAKE_CASE : int = self.conv_in(lowerCamelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[Any] ):
def custom_forward(*lowerCamelCase_ : List[str] ):
return module(*lowerCamelCase_ )
return custom_forward
# down
if is_torch_version(""">=""" , """1.11.0""" ):
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : str = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : Optional[int] = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , lowerCamelCase_ )
else:
# down
for down_block in self.down_blocks:
SCREAMING_SNAKE_CASE : Tuple = down_block(lowerCamelCase_ )
# middle
SCREAMING_SNAKE_CASE : List[Any] = self.mid_block(lowerCamelCase_ )
# post-process
SCREAMING_SNAKE_CASE : Optional[Any] = self.conv_norm_out(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase_ : Optional[int]=3 , lowerCamelCase_ : Tuple=3 , lowerCamelCase_ : str=("UpDecoderBlock2D",) , lowerCamelCase_ : Union[str, Any]=(64,) , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Dict="silu" , lowerCamelCase_ : Any="group" , ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : int = layers_per_block
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
lowerCamelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : Any = nn.ModuleList([] )
SCREAMING_SNAKE_CASE : str = in_channels if norm_type == """spatial""" else None
# mid
SCREAMING_SNAKE_CASE : Dict = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , output_scale_factor=1 , resnet_time_scale_shift="""default""" if norm_type == """group""" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=lowerCamelCase_ , temb_channels=lowerCamelCase_ , )
# up
SCREAMING_SNAKE_CASE : Union[str, Any] = list(reversed(lowerCamelCase_ ) )
SCREAMING_SNAKE_CASE : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase_ ):
SCREAMING_SNAKE_CASE : str = output_channel
SCREAMING_SNAKE_CASE : Union[str, Any] = reversed_block_out_channels[i]
SCREAMING_SNAKE_CASE : List[str] = i == len(lowerCamelCase_ ) - 1
SCREAMING_SNAKE_CASE : List[Any] = get_up_block(
lowerCamelCase_ , num_layers=self.layers_per_block + 1 , in_channels=lowerCamelCase_ , out_channels=lowerCamelCase_ , prev_output_channel=lowerCamelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=lowerCamelCase_ , resnet_groups=lowerCamelCase_ , attention_head_dim=lowerCamelCase_ , temb_channels=lowerCamelCase_ , resnet_time_scale_shift=lowerCamelCase_ , )
self.up_blocks.append(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = output_channel
# out
if norm_type == "spatial":
SCREAMING_SNAKE_CASE : List[Any] = SpatialNorm(block_out_channels[0] , lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Tuple = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=lowerCamelCase_ , eps=1e-6 )
SCREAMING_SNAKE_CASE : Dict = nn.SiLU()
SCREAMING_SNAKE_CASE : str = nn.Convad(block_out_channels[0] , lowerCamelCase_ , 3 , padding=1 )
SCREAMING_SNAKE_CASE : Dict = False
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = z
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_in(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Dict = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(lowerCamelCase_ : List[str] ):
def custom_forward(*lowerCamelCase_ : str ):
return module(*lowerCamelCase_ )
return custom_forward
if is_torch_version(""">=""" , """1.11.0""" ):
# middle
SCREAMING_SNAKE_CASE : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Optional[Any] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ , use_reentrant=lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : List[str] = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(lowerCamelCase_ ) , lowerCamelCase_ , lowerCamelCase_ )
else:
# middle
SCREAMING_SNAKE_CASE : Any = self.mid_block(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = sample.to(lowerCamelCase_ )
# up
for up_block in self.up_blocks:
SCREAMING_SNAKE_CASE : Any = up_block(lowerCamelCase_ , lowerCamelCase_ )
# post-process
if latent_embeds is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_norm_out(lowerCamelCase_ )
else:
SCREAMING_SNAKE_CASE : Optional[int] = self.conv_norm_out(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.conv_act(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = self.conv_out(lowerCamelCase_ )
return sample
class UpperCamelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int=None , lowerCamelCase_ : Any="random" , lowerCamelCase_ : List[str]=False , lowerCamelCase_ : List[Any]=True ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Tuple = n_e
SCREAMING_SNAKE_CASE : int = vq_embed_dim
SCREAMING_SNAKE_CASE : Tuple = beta
SCREAMING_SNAKE_CASE : Union[str, Any] = legacy
SCREAMING_SNAKE_CASE : int = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
SCREAMING_SNAKE_CASE : Optional[Any] = remap
if self.remap is not None:
self.register_buffer("""used""" , torch.tensor(np.load(self.remap ) ) )
SCREAMING_SNAKE_CASE : Tuple = self.used.shape[0]
SCREAMING_SNAKE_CASE : Any = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
SCREAMING_SNAKE_CASE : Union[str, Any] = self.re_embed
SCREAMING_SNAKE_CASE : Any = self.re_embed + 1
print(
f'''Remapping {self.n_e} indices to {self.re_embed} indices. '''
f'''Using {self.unknown_index} for unknown indices.''' )
else:
SCREAMING_SNAKE_CASE : Optional[int] = n_e
SCREAMING_SNAKE_CASE : Any = sane_index_shape
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : Tuple = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
SCREAMING_SNAKE_CASE : Union[str, Any] = match.argmax(-1 )
SCREAMING_SNAKE_CASE : Tuple = match.sum(2 ) < 1
if self.unknown_index == "random":
SCREAMING_SNAKE_CASE : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
SCREAMING_SNAKE_CASE : Any = self.unknown_index
return new.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = inds.shape
assert len(lowerCamelCase_ ) > 1
SCREAMING_SNAKE_CASE : str = inds.reshape(ishape[0] , -1 )
SCREAMING_SNAKE_CASE : Tuple = self.used.to(lowerCamelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
SCREAMING_SNAKE_CASE : List[Any] = 0 # simply set to zero
SCREAMING_SNAKE_CASE : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , lowerCamelCase_ )
return back.reshape(lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = z.permute(0 , 2 , 3 , 1 ).contiguous()
SCREAMING_SNAKE_CASE : int = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
SCREAMING_SNAKE_CASE : Any = torch.argmin(torch.cdist(lowerCamelCase_ , self.embedding.weight ) , dim=1 )
SCREAMING_SNAKE_CASE : Tuple = self.embedding(lowerCamelCase_ ).view(z.shape )
SCREAMING_SNAKE_CASE : Any = None
SCREAMING_SNAKE_CASE : List[str] = None
# compute loss for embedding
if not self.legacy:
SCREAMING_SNAKE_CASE : Optional[Any] = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
SCREAMING_SNAKE_CASE : Tuple = z + (z_q - z).detach()
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : Union[str, Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.remap_to_used(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Tuple = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
SCREAMING_SNAKE_CASE : int = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase_ ( self : Tuple , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str] ):
'''simple docstring'''
if self.remap is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = indices.reshape(shape[0] , -1 ) # add batch axis
SCREAMING_SNAKE_CASE : List[Any] = self.unmap_to_all(lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Any = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
SCREAMING_SNAKE_CASE : str = self.embedding(lowerCamelCase_ )
if shape is not None:
SCREAMING_SNAKE_CASE : List[str] = z_q.view(lowerCamelCase_ )
# reshape back to match original input shape
SCREAMING_SNAKE_CASE : int = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int]=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = parameters
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE : Dict = torch.chunk(lowerCamelCase_ , 2 , dim=1 )
SCREAMING_SNAKE_CASE : List[str] = torch.clamp(self.logvar , -30.0 , 20.0 )
SCREAMING_SNAKE_CASE : Dict = deterministic
SCREAMING_SNAKE_CASE : int = torch.exp(0.5 * self.logvar )
SCREAMING_SNAKE_CASE : Tuple = torch.exp(self.logvar )
if self.deterministic:
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : Optional[torch.Generator] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = randn_tensor(
self.mean.shape , generator=lowerCamelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
SCREAMING_SNAKE_CASE : Optional[Any] = self.mean + self.std * sample
return x
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : int=None ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int]=[1, 2, 3] ):
'''simple docstring'''
if self.deterministic:
return torch.Tensor([0.0] )
SCREAMING_SNAKE_CASE : List[Any] = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=lowerCamelCase_ )
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.mean
| 323
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 125
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __UpperCAmelCase :
def __init__( self: Tuple , UpperCAmelCase_: Tuple , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 512
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict , UpperCAmelCase_: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Dict , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Any , UpperCAmelCase_: List[str] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFEsmModel(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ )
# Also check the case where encoder outputs are not passed
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self: Dict , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: int , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFEsmForTokenClassification(config=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__snake_case : List[str] = False
__snake_case : Union[str, Any] = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_ )
@slow
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class(UpperCAmelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_SCREAMING_SNAKE_CASE = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable )
else:
_SCREAMING_SNAKE_CASE = model.get_output_embeddings()
assert x is None
_SCREAMING_SNAKE_CASE = model.get_bias()
assert name is None
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase_ )
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
# compare the actual values for a slice.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 125
| 1
|
'''simple docstring'''
from statistics import mean, stdev
def lowerCAmelCase (__A , __A = 3):
"""simple docstring"""
_a = min(__a)
_a = max(__a)
# normalize data
return [round((x - x_min) / (x_max - x_min) , __a) for x in data]
def lowerCAmelCase (__A , __A = 3):
"""simple docstring"""
_a = mean(__a)
_a = stdev(__a)
# standardize data
return [round((x - mu) / (sigma) , __a) for x in data]
| 211
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """visual_bert"""
def __init__(self , SCREAMING_SNAKE_CASE_=3_05_22 , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=2 , **SCREAMING_SNAKE_CASE_ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = visual_embedding_dim
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = bypass_transformer
UpperCamelCase__ = special_visual_initialize
| 244
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.dummy_uncond_unet
_lowerCamelCase : Tuple = DDIMScheduler()
_lowerCamelCase : Union[str, Any] = self.dummy_vq_model
_lowerCamelCase : Optional[int] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Optional[Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : Union[str, Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 12
|
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split()
| 12
| 1
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowercase : Any = logging.get_logger(__name__)
# General docstring
lowercase : Tuple = """RegNetConfig"""
# Base docstring
lowercase : int = """facebook/regnet-y-040"""
lowercase : str = [1, 1088, 7, 7]
# Image classification docstring
lowercase : Tuple = """facebook/regnet-y-040"""
lowercase : Optional[int] = """tabby, tabby cat"""
lowercase : str = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case = 3 ,snake_case = 1 ,snake_case = 1 ,snake_case = "relu" ,**snake_case ,):
'''simple docstring'''
super().__init__(**snake_case )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase : Any = tf.keras.layers.ConvaD(
filters=snake_case ,kernel_size=snake_case ,strides=snake_case ,padding="""VALID""" ,groups=snake_case ,use_bias=snake_case ,name="""convolution""" ,)
lowercase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name="""normalization""" )
lowercase : Dict = ACTaFN[activation] if activation is not None else tf.identity
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.convolution(self.padding(snake_case ) )
lowercase : Any = self.normalization(snake_case )
lowercase : int = self.activation(snake_case )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Dict = config.num_channels
lowercase : Any = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name="""embedder""" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = shape_list(snake_case )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase : str = tf.transpose(snake_case ,perm=(0, 2, 3, 1) )
lowercase : Dict = self.embedder(snake_case )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case = 2 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Dict = tf.keras.layers.ConvaD(
filters=snake_case ,kernel_size=1 ,strides=snake_case ,use_bias=snake_case ,name="""convolution""" )
lowercase : List[str] = tf.keras.layers.BatchNormalization(epsilon=1e-5 ,momentum=0.9 ,name="""normalization""" )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case ) ,training=snake_case )
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Optional[Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case ,name="""pooler""" )
lowercase : Dict = [
tf.keras.layers.ConvaD(filters=snake_case ,kernel_size=1 ,activation="""relu""" ,name="""attention.0""" ),
tf.keras.layers.ConvaD(filters=snake_case ,kernel_size=1 ,activation="""sigmoid""" ,name="""attention.2""" ),
]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.pooler(snake_case )
for layer_module in self.attention:
lowercase : List[Any] = layer_module(snake_case )
lowercase : Union[str, Any] = hidden_state * pooled
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case = 1 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : List[Any] = in_channels != out_channels or stride != 1
lowercase : Tuple = max(1 ,out_channels // config.groups_width )
lowercase : str = (
TFRegNetShortCut(snake_case ,stride=snake_case ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase : List[Any] = [
TFRegNetConvLayer(snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
snake_case ,stride=snake_case ,groups=snake_case ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetConvLayer(snake_case ,kernel_size=1 ,activation=snake_case ,name="""layer.2""" ),
]
lowercase : Any = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = hidden_state
for layer_module in self.layers:
lowercase : Optional[int] = layer_module(snake_case )
lowercase : Optional[Any] = self.shortcut(snake_case )
hidden_state += residual
lowercase : Dict = self.activation(snake_case )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case = 1 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : Optional[Any] = in_channels != out_channels or stride != 1
lowercase : List[Any] = max(1 ,out_channels // config.groups_width )
lowercase : Optional[Any] = (
TFRegNetShortCut(snake_case ,stride=snake_case ,name="""shortcut""" )
if should_apply_shortcut
else tf.keras.layers.Activation("""linear""" ,name="""shortcut""" )
)
lowercase : Optional[int] = [
TFRegNetConvLayer(snake_case ,kernel_size=1 ,activation=config.hidden_act ,name="""layer.0""" ),
TFRegNetConvLayer(
snake_case ,stride=snake_case ,groups=snake_case ,activation=config.hidden_act ,name="""layer.1""" ),
TFRegNetSELayer(snake_case ,reduced_channels=int(round(in_channels / 4 ) ) ,name="""layer.2""" ),
TFRegNetConvLayer(snake_case ,kernel_size=1 ,activation=snake_case ,name="""layer.3""" ),
]
lowercase : List[Any] = ACTaFN[config.hidden_act]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = hidden_state
for layer_module in self.layers:
lowercase : Tuple = layer_module(snake_case )
lowercase : Union[str, Any] = self.shortcut(snake_case )
hidden_state += residual
lowercase : Optional[int] = self.activation(snake_case )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,snake_case ,snake_case ,snake_case = 2 ,snake_case = 2 ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : List[str] = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase : List[Any] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case ,snake_case ,snake_case ,stride=snake_case ,name="""layers.0""" ),
*[layer(snake_case ,snake_case ,snake_case ,name=f"layers.{i+1}" ) for i in range(depth - 1 )],
]
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
for layer_module in self.layers:
lowercase : Dict = layer_module(snake_case )
return hidden_state
class __snake_case ( tf.keras.layers.Layer ):
def __init__( self ,snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : int = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name="""stages.0""" ,) )
lowercase : str = zip(config.hidden_sizes ,config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case ,config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case ,snake_case ,snake_case ,depth=snake_case ,name=f"stages.{i+1}" ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = False ,snake_case = True ):
'''simple docstring'''
lowercase : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase : str = hidden_states + (hidden_state,)
lowercase : Union[str, Any] = stage_module(snake_case )
if output_hidden_states:
lowercase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case ,hidden_states=snake_case )
@keras_serializable
class __snake_case ( tf.keras.layers.Layer ):
_a : List[Any]= RegNetConfig
def __init__( self ,snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
lowercase : List[str] = config
lowercase : Optional[int] = TFRegNetEmbeddings(snake_case ,name="""embedder""" )
lowercase : List[Any] = TFRegNetEncoder(snake_case ,name="""encoder""" )
lowercase : Union[str, Any] = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case ,name="""pooler""" )
@unpack_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = False ,):
'''simple docstring'''
lowercase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : List[Any] = self.embedder(snake_case ,training=snake_case )
lowercase : Optional[Any] = self.encoder(
snake_case ,output_hidden_states=snake_case ,return_dict=snake_case ,training=snake_case )
lowercase : Optional[Any] = encoder_outputs[0]
lowercase : Optional[int] = self.pooler(snake_case )
# Change to NCHW output format have uniformity in the modules
lowercase : Union[str, Any] = tf.transpose(snake_case ,perm=(0, 3, 1, 2) )
lowercase : Dict = tf.transpose(snake_case ,perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase : Any = tuple([tf.transpose(snake_case ,perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case ,pooler_output=snake_case ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class __snake_case ( lowerCAmelCase ):
_a : Union[str, Any]= RegNetConfig
_a : Optional[int]= "regnet"
_a : str= "pixel_values"
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) ,dtype=tf.floataa )}
lowercase : Union[str, Any] = R"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowercase : Tuple = R"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase , )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(snake_case ,*snake_case ,**snake_case )
lowercase : Dict = TFRegNetMainLayer(snake_case ,name="""regnet""" )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=snake_case ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case=False ,):
'''simple docstring'''
lowercase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Optional[int] = self.regnet(
pixel_values=snake_case ,output_hidden_states=snake_case ,return_dict=snake_case ,training=snake_case ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase , )
class __snake_case ( lowerCAmelCase , lowerCAmelCase ):
def __init__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(snake_case ,*snake_case ,**snake_case )
lowercase : Optional[int] = config.num_labels
lowercase : Optional[Any] = TFRegNetMainLayer(snake_case ,name="""regnet""" )
# classification head
lowercase : Union[str, Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name="""classifier.1""" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=snake_case ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case=False ,):
'''simple docstring'''
lowercase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Optional[int] = self.regnet(
snake_case ,output_hidden_states=snake_case ,return_dict=snake_case ,training=snake_case )
lowercase : Any = outputs.pooler_output if return_dict else outputs[1]
lowercase : Dict = self.classifier[0](snake_case )
lowercase : Optional[int] = self.classifier[1](snake_case )
lowercase : Dict = None if labels is None else self.hf_compute_loss(labels=snake_case ,logits=snake_case )
if not return_dict:
lowercase : Dict = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case ,logits=snake_case ,hidden_states=outputs.hidden_states )
| 20
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(
lowerCAmelCase , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class __snake_case ( lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if self.framework == "tf":
lowercase : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
lowercase : Optional[int] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Tuple = self.get_masked_index(snake_case )
lowercase : Dict = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,f"No mask_token ({self.tokenizer.mask_token}) found on the input" ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,**snake_case ):
'''simple docstring'''
if return_tensors is None:
lowercase : int = self.framework
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=snake_case )
self.ensure_exactly_one_mask_token(snake_case )
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = self.model(**snake_case )
lowercase : Tuple = model_inputs["""input_ids"""]
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ,snake_case=None ):
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
lowercase : str = target_ids.shape[0]
lowercase : Optional[Any] = model_outputs["""input_ids"""][0]
lowercase : List[str] = model_outputs["""logits"""]
if self.framework == "tf":
lowercase : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
lowercase : Tuple = outputs.numpy()
lowercase : Tuple = outputs[0, masked_index, :]
lowercase : Any = stable_softmax(snake_case ,axis=-1 )
if target_ids is not None:
lowercase : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case ,0 ) ,target_ids.reshape(-1 ,1 ) )
lowercase : int = tf.expand_dims(snake_case ,0 )
lowercase : Tuple = tf.math.top_k(snake_case ,k=snake_case )
lowercase , lowercase : int = topk.values.numpy(), topk.indices.numpy()
else:
lowercase : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id ,as_tuple=snake_case ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
lowercase : Union[str, Any] = outputs[0, masked_index, :]
lowercase : Tuple = logits.softmax(dim=-1 )
if target_ids is not None:
lowercase : List[str] = probs[..., target_ids]
lowercase , lowercase : Union[str, Any] = probs.topk(snake_case )
lowercase : Any = []
lowercase : List[Any] = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() ,predictions.tolist() ) ):
lowercase : Dict = []
for v, p in zip(_values ,_predictions ):
# Copy is important since we're going to modify this array in place
lowercase : Dict = input_ids.numpy().copy()
if target_ids is not None:
lowercase : Union[str, Any] = target_ids[p].tolist()
lowercase : Tuple = p
# Filter padding out:
lowercase : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
lowercase : Tuple = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Optional[Any] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(snake_case )
result.append(snake_case )
if single_mask:
return result[0]
return result
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
lowercase : List[Any] = [targets]
try:
lowercase : List[str] = self.tokenizer.get_vocab()
except Exception:
lowercase : Any = {}
lowercase : Dict = []
for target in targets:
lowercase : Dict = vocab.get(snake_case ,snake_case )
if id_ is None:
lowercase : Optional[int] = self.tokenizer(
snake_case ,add_special_tokens=snake_case ,return_attention_mask=snake_case ,return_token_type_ids=snake_case ,max_length=1 ,truncation=snake_case ,)["""input_ids"""]
if len(snake_case ) == 0:
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
lowercase : Union[str, Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"The specified target token `{target}` does not exist in the model vocabulary. "
f"Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`." )
target_ids.append(id_ )
lowercase : Optional[Any] = list(set(snake_case ) )
if len(snake_case ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
lowercase : Optional[Any] = np.array(snake_case )
return target_ids
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : Dict = {}
if targets is not None:
lowercase : str = self.get_target_ids(snake_case ,snake_case )
lowercase : List[Any] = target_ids
if top_k is not None:
lowercase : List[str] = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" ,self.model.base_model_prefix ,"""The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self ,snake_case ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Tuple = super().__call__(snake_case ,**snake_case )
if isinstance(snake_case ,snake_case ) and len(snake_case ) == 1:
return outputs[0]
return outputs
| 20
| 1
|
def SCREAMING_SNAKE_CASE__ ( __a ):
def merge(__a , __a ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(_UpperCamelCase ) <= 1:
return collection
snake_case_ : str = len(_UpperCamelCase ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = input("""Enter numbers separated by a comma:\n""").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(""",""")]
print(*merge_sort(unsorted), sep=""",""")
| 368
|
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
A__ : List[str] = [
"""openmmlab/upernet-convnext-tiny""",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
A__ : Union[str, Any] = """UperNetConfig"""
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 1 , ) -> None:
super().__init__()
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=SCREAMING_SNAKE_CASE_ , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : List[str] = nn.BatchNormad(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = nn.ReLU()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
__lowerCamelCase : List[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = self.batch_norm(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE_ )
return output
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
super().__init__()
__lowerCamelCase : str = [
nn.AdaptiveAvgPoolad(SCREAMING_SNAKE_CASE_ ),
UperNetConvModule(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
__lowerCamelCase : Union[str, Any] = input
for layer in self.layers:
__lowerCamelCase : str = layer(SCREAMING_SNAKE_CASE_ )
return hidden_state
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
super().__init__()
__lowerCamelCase : Optional[int] = pool_scales
__lowerCamelCase : List[Any] = align_corners
__lowerCamelCase : str = in_channels
__lowerCamelCase : List[Any] = channels
__lowerCamelCase : int = []
for i, pool_scale in enumerate(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = UperNetPyramidPoolingBlock(pool_scale=SCREAMING_SNAKE_CASE_ , in_channels=SCREAMING_SNAKE_CASE_ , channels=SCREAMING_SNAKE_CASE_ )
self.blocks.append(SCREAMING_SNAKE_CASE_ )
self.add_module(str(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[torch.Tensor]:
__lowerCamelCase : Optional[int] = []
for ppm in self.blocks:
__lowerCamelCase : Dict = ppm(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=x.size()[2:] , mode='bilinear' , align_corners=self.align_corners )
ppm_outs.append(SCREAMING_SNAKE_CASE_ )
return ppm_outs
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__()
__lowerCamelCase : Union[str, Any] = config
__lowerCamelCase : int = config.pool_scales # e.g. (1, 2, 3, 6)
__lowerCamelCase : Tuple = in_channels
__lowerCamelCase : Union[str, Any] = config.hidden_size
__lowerCamelCase : int = False
__lowerCamelCase : Optional[int] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__lowerCamelCase : List[str] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__lowerCamelCase : Optional[Any] = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__lowerCamelCase : List[Any] = nn.ModuleList()
__lowerCamelCase : Dict = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__lowerCamelCase : int = UperNetConvModule(SCREAMING_SNAKE_CASE_ , self.channels , kernel_size=1 )
__lowerCamelCase : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(SCREAMING_SNAKE_CASE_ )
self.fpn_convs.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def lowercase_ ( self ) -> List[Any]:
self.apply(self._init_weights )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : int = inputs[-1]
__lowerCamelCase : List[Any] = [x]
psp_outs.extend(self.psp_modules(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : str = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__lowerCamelCase : Tuple = self.bottleneck(SCREAMING_SNAKE_CASE_ )
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
# build laterals
__lowerCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(SCREAMING_SNAKE_CASE_ ) )
# build top-down path
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCamelCase : str = laterals[i - 1].shape[2:]
__lowerCamelCase : str = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=SCREAMING_SNAKE_CASE_ , mode='bilinear' , align_corners=self.align_corners )
# build outputs
__lowerCamelCase : Optional[int] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__lowerCamelCase : str = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='bilinear' , align_corners=self.align_corners )
__lowerCamelCase : Tuple = torch.cat(SCREAMING_SNAKE_CASE_ , dim=1 )
__lowerCamelCase : Tuple = self.fpn_bottleneck(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 2 , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 1 ) -> None:
super().__init__()
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Dict = config.auxiliary_in_channels
__lowerCamelCase : List[str] = config.auxiliary_channels
__lowerCamelCase : List[Any] = config.auxiliary_num_convs
__lowerCamelCase : List[Any] = config.auxiliary_concat_input
__lowerCamelCase : Any = in_index
__lowerCamelCase : Dict = (kernel_size // 2) * dilation
__lowerCamelCase : Union[str, Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , dilation=SCREAMING_SNAKE_CASE_ ) )
if self.num_convs == 0:
__lowerCamelCase : Any = nn.Identity()
else:
__lowerCamelCase : Any = nn.Sequential(*SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__lowerCamelCase : Any = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=SCREAMING_SNAKE_CASE_ , padding=kernel_size // 2 )
__lowerCamelCase : Union[str, Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def lowercase_ ( self ) -> int:
self.apply(self._init_weights )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> torch.Tensor:
# just take the relevant feature maps
__lowerCamelCase : Optional[int] = encoder_hidden_states[self.in_index]
__lowerCamelCase : int = self.convs(SCREAMING_SNAKE_CASE_ )
if self.concat_input:
__lowerCamelCase : Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__lowerCamelCase : Optional[Any] = self.classifier(SCREAMING_SNAKE_CASE_ )
return output
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : int = UperNetConfig
lowerCamelCase : List[Any] = 'pixel_values'
lowerCamelCase : Dict = True
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def lowercase_ ( self ) -> Optional[int]:
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> Optional[int]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : List[str] = value
A__ : Union[str, Any] = R"""
Parameters:
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
A__ : List[Any] = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
[`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See
`attentions` under returned tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under
returned tensors for more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , _UpperCAmelCase , )
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__lowerCamelCase : List[Any] = UperNetHead(SCREAMING_SNAKE_CASE_ , in_channels=self.backbone.channels )
__lowerCamelCase : Optional[Any] = UperNetFCNHead(SCREAMING_SNAKE_CASE_ ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('batch_size, sequence_length' ) )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE_ , config_class=_CONFIG_FOR_DOC )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , ) -> Union[tuple, SemanticSegmenterOutput]:
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Any = output_attentions if output_attentions is not None else self.config.output_attentions
__lowerCamelCase : str = self.backbone.forward_with_filtered_kwargs(
SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = outputs.feature_maps
__lowerCamelCase : Optional[int] = self.decode_head(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.functional.interpolate(SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = None
if self.auxiliary_head is not None:
__lowerCamelCase : str = self.auxiliary_head(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = nn.functional.interpolate(
SCREAMING_SNAKE_CASE_ , size=pixel_values.shape[2:] , mode='bilinear' , align_corners=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('The number of labels should be greater than one' )
else:
# compute weighted loss
__lowerCamelCase : int = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__lowerCamelCase : Optional[int] = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__lowerCamelCase : Optional[int] = (logits,) + outputs[1:]
else:
__lowerCamelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=SCREAMING_SNAKE_CASE_ , logits=SCREAMING_SNAKE_CASE_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 185
|
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
__lowerCamelCase : Dict = img
__lowerCamelCase : Any = img.shape[1]
__lowerCamelCase : Optional[int] = img.shape[0]
__lowerCamelCase : Dict = dst_width
__lowerCamelCase : str = dst_height
__lowerCamelCase : Dict = self.src_w / self.dst_w
__lowerCamelCase : List[Any] = self.src_h / self.dst_h
__lowerCamelCase : Optional[int] = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def lowercase_ ( self ) -> List[Any]:
for i in range(self.dst_h ):
for j in range(self.dst_w ):
__lowerCamelCase : Union[str, Any] = self.img[self.get_y(SCREAMING_SNAKE_CASE_ )][self.get_x(SCREAMING_SNAKE_CASE_ )]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_x * x )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> int:
return int(self.ratio_y * y )
if __name__ == "__main__":
A__ , A__ : Optional[Any] = 800, 600
A__ : List[str] = imread("""image_data/lena.jpg""", 1)
A__ : List[Any] = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 185
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=7 , a_=3 , a_=18 , a_=30 , a_=4_00 , a_=True , a_=None , a_=True , a_=[0.5, 0.5, 0.5] , a_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
__snake_case : str = size if size is not None else {'''height''': 18, '''width''': 18}
__snake_case : str = parent
__snake_case : Optional[Any] = batch_size
__snake_case : int = num_channels
__snake_case : List[Any] = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : List[Any] = max_resolution
__snake_case : List[str] = do_resize
__snake_case : str = size
__snake_case : Union[str, Any] = do_normalize
__snake_case : Optional[Any] = image_mean
__snake_case : str = image_std
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[str] = DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''image_mean''' ) )
self.assertTrue(hasattr(a_ , '''image_std''' ) )
self.assertTrue(hasattr(a_ , '''do_normalize''' ) )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__snake_case : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__snake_case : int = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__snake_case : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__snake_case : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 364
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
SCREAMING_SNAKE_CASE : Optional[int] = None
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE : List[Any] = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE : Tuple = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
SCREAMING_SNAKE_CASE : List[Any] = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =['input_ids', 'attention_mask']
lowerCamelCase__ =MBartTokenizer
lowerCamelCase__ =[]
lowerCamelCase__ =[]
def __init__(self , a_=None , a_=None , a_="<s>" , a_="</s>" , a_="</s>" , a_="<s>" , a_="<unk>" , a_="<pad>" , a_="<mask>" , a_=None , a_=None , a_=None , **a_ , ):
'''simple docstring'''
__snake_case : Optional[int] = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else mask_token
super().__init__(
vocab_file=a_ , tokenizer_file=a_ , bos_token=a_ , eos_token=a_ , sep_token=a_ , cls_token=a_ , unk_token=a_ , pad_token=a_ , mask_token=a_ , src_lang=a_ , tgt_lang=a_ , additional_special_tokens=a_ , **a_ , )
__snake_case : Tuple = vocab_file
__snake_case : Optional[Any] = False if not self.vocab_file else True
__snake_case : Dict = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__snake_case : Optional[int] = {
lang_code: self.convert_tokens_to_ids(a_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case : List[Any] = src_lang if src_lang is not None else '''en_XX'''
__snake_case : Any = self.convert_tokens_to_ids(self._src_lang )
__snake_case : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
__snake_case : Tuple = [self.sep_token_id]
__snake_case : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ , **a_ ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__snake_case : Optional[int] = src_lang
__snake_case : Tuple = self(a_ , add_special_tokens=a_ , return_tensors=a_ , **a_ )
__snake_case : Union[str, Any] = self.convert_tokens_to_ids(a_ )
__snake_case : int = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE (self , a_ , a_ = "en_XX" , a_ = None , a_ = "ro_RO" , **a_ , ):
'''simple docstring'''
__snake_case : int = src_lang
__snake_case : List[Any] = tgt_lang
return super().prepare_seqaseq_batch(a_ , a_ , **a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : List[Any] = []
__snake_case : Any = [self.eos_token_id, self.cur_lang_code]
__snake_case : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Any = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : int = self.convert_tokens_to_ids(a_ )
__snake_case : Optional[Any] = []
__snake_case : Dict = [self.eos_token_id, self.cur_lang_code]
__snake_case : str = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case : Any = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case : Tuple = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory.""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 24
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : np.ndarray ) -> tuple[np.ndarray, np.ndarray]:
UpperCAmelCase_ , UpperCAmelCase_ : int = np.shape(SCREAMING_SNAKE_CASE__ )
if rows != columns:
UpperCAmelCase_ : Any = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Optional[Any] = np.zeros((rows, columns) )
UpperCAmelCase_ : Optional[int] = np.zeros((rows, columns) )
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : List[Any] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
UpperCAmelCase_ : str = (table[i][j] - total) / upper[j][j]
UpperCAmelCase_ : Union[str, Any] = 1
for j in range(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase_ : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(SCREAMING_SNAKE_CASE__ ) )
UpperCAmelCase_ : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 125
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case_ : Union[str, Any] = {
"configuration_mask2former": [
"MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Mask2FormerConfig",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] = ["Mask2FormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = [
"MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"Mask2FormerForUniversalSegmentation",
"Mask2FormerModel",
"Mask2FormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
snake_case_ : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 125
| 1
|
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
lowercase__ : List[Any] = get_tests_dir("fixtures/dummy-config.json")
class a__ ( unittest.TestCase ):
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = 0
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
a = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(A , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = AutoConfig.from_pretrained(A )
self.assertIsInstance(A , A )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = AutoConfig.from_pretrained(A )
self.assertIsInstance(A , A )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = AutoConfig.for_model("roberta" )
self.assertIsInstance(A , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
a = os.path.join(A , "fake-roberta" )
os.makedirs(A , exist_ok=A )
with open(os.path.join(A , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
a = AutoConfig.from_pretrained(A )
self.assertEqual(type(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
try:
AutoConfig.register("custom" , A )
# Wrong model type will raise an error
with self.assertRaises(A ):
AutoConfig.register("model" , A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(A ):
AutoConfig.register("bert" , A )
# Now that the config is registered, it can be used as any other config with the auto-API
a = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
a = AutoConfig.from_pretrained(A )
self.assertIsInstance(A , A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
A , "bert-base is not a local folder and is not a valid model identifier" ):
a = AutoConfig.from_pretrained("bert-base" )
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
with self.assertRaisesRegex(
A , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
a = AutoConfig.from_pretrained(A , revision="aaaaaa" )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
with self.assertRaisesRegex(
A , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
a = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaises(A ):
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(A ):
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A )
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
a = AutoConfig.from_pretrained(A , trust_remote_code=A )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
class a__ ( UpperCamelCase__ ):
a : Any = """new-model"""
try:
AutoConfig.register("new-model" , A )
# If remote code is not set, the default is to use local
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
a = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 180
|
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'{bindir}/../../examples/pytorch/translation'):
from run_translation import main # noqa
set_seed(42)
lowercase__ : List[str] = "sshleifer/student_marian_en_ro_6_1"
lowercase__ : List[Any] = "sshleifer/tiny-mbart"
@require_torch
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self , A=False , A=None , A=True , A=True , A=True , A=True , ) -> List[Any]:
'''simple docstring'''
a = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=A , num_train_epochs=1 , distributed=A , extra_args_str=A , predict_with_generate=A , do_train=A , do_eval=A , do_predict=A , )
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
if not do_eval:
return
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
a = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , A )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=A )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.run_seqaseq_quick(
distributed=A , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=A )
@require_apex
@require_torch_gpu
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=A , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowerCAmelCase_ ( self , A ) -> Dict:
'''simple docstring'''
a = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
a = experiments[experiment_id]
a = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
a = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**A , extra_args_str=data["extra_args_str"] )
a = len(re.findall(A , cl.err ) )
self.assertEqual(A , data["n_matches"] )
@slow
def lowerCAmelCase_ ( self ) -> str:
'''simple docstring'''
a = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=10 , distributed=A , )
# Check metrics
a = TrainerState.load_from_json(os.path.join(A , "trainer_state.json" ) ).log_history
a = [log for log in logs if "eval_loss" in log.keys()]
a = eval_metrics[0]
a = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , A )
# test if do_predict saves generations and metrics
a = os.listdir(A )
a = {os.path.basename(A ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(A ) -> Tuple[int, float]:
a = "--skip_memory_metrics 0"
a = self.run_trainer(
max_len=128 , model_name=A , learning_rate=3e-4 , num_train_epochs=1 , optim=A , distributed=A , extra_args_str=A , do_eval=A , do_predict=A , n_gpus_to_use=1 , )
# Check metrics
a = TrainerState.load_from_json(Path(A , "trainer_state.json" ) ).log_history
a = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
a = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
a = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
a , a , a = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
a = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
a = gpu_peak_mem_orig + gpu_alloc_mem_orig
a = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
a = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
a = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
A , A , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
F''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
A , A , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
F''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
F''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
A , A , F'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def lowerCAmelCase_ ( self , A , A , A , A = 3e-3 , A = "adafactor" , A = False , A = None , A = 0 , A = True , A = True , A = True , A = True , A = None , ) -> Tuple:
'''simple docstring'''
a = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
a = self.get_auto_remove_tmp_dir()
a = F'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(A )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(A )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
a = F'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(A )}
'''.split()
a = "\n --do_predict\n ".split()
a = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += F'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
a = get_gpu_count()
a = get_torch_dist_unique_port()
a = F'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
a = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(A , env=self.get_env() )
else:
a = ["run_translation.py"] + args
with patch.object(A , "argv" , A ):
main()
return output_dir
| 180
| 1
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase__( unittest.TestCase):
@property
def lowerCAmelCase__ ( self: Optional[int] ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
torch.manual_seed(0 )
__lowerCamelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def lowerCAmelCase__ ( self: Any ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.dummy_uncond_unet
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = self.dummy_vq_model
__lowerCamelCase = LDMPipeline(unet=UpperCamelCase_ , vqvae=UpperCamelCase_ , scheduler=UpperCamelCase_ )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""numpy""" ).images
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""numpy""" , return_dict=UpperCamelCase_ )[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
__lowerCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__lowerCamelCase = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(UpperCamelCase_ )
ldm.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ldm(generator=UpperCamelCase_ , num_inference_steps=5 , output_type="""numpy""" ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
__lowerCamelCase = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
__lowerCamelCase = 1E-2 if torch_device != """mps""" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 12
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Union[str, Any] = StableDiffusionInpaintPipeline
UpperCAmelCase__ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : int = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase__ : Union[str, Any] = frozenset([])
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase_ , )
__lowerCamelCase = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
__lowerCamelCase = CLIPTextModel(UpperCamelCase_ )
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__lowerCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
__lowerCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert("""RGB""" ).resize((64, 64) )
__lowerCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = StableDiffusionInpaintPipeline(**UpperCamelCase_ )
__lowerCamelCase = sd_pipe.to(UpperCamelCase_ )
sd_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
__lowerCamelCase = sd_pipe(**UpperCamelCase_ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: int ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: List[Any] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(UpperCamelCase_ , safety_checker=UpperCamelCase_ )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , torch_dtype=torch.floataa , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , output_type="""np""" , )
__lowerCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowerCAmelCase__ ( self: int ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
__lowerCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
__lowerCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
__lowerCamelCase = PNDMScheduler.from_pretrained(UpperCamelCase_ , subfolder="""scheduler""" )
__lowerCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , scheduler=UpperCamelCase_ , torch_dtype=torch.floataa , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=2 , output_type="""np""" , )
__lowerCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 12
| 1
|
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _UpperCAmelCase ( a ):
'''simple docstring'''
def __init__( self , A , A , A=1_0_2_4 , A=1_0_2_4 , A=3.6 ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = tokenizer
_UpperCAmelCase : str = tokenizer.bos_token_id
_UpperCAmelCase : Optional[Any] = dataset
_UpperCAmelCase : Any = seq_length
_UpperCAmelCase : Dict = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Tuple:
_UpperCAmelCase : List[Any] = iter(self.dataset )
_UpperCAmelCase : Optional[Any] = True
while more_examples:
_UpperCAmelCase , _UpperCAmelCase : str = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
_UpperCAmelCase : int = False
break
_UpperCAmelCase : Dict = tokenizer(A , truncation=A )['''input_ids''']
_UpperCAmelCase : Dict = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A ) , self.seq_length ):
_UpperCAmelCase : Any = all_token_ids[i : i + self.seq_length]
if len(A ) == self.seq_length:
yield torch.tensor(A )
def lowerCamelCase_ (UpperCamelCase__ : Dict ):
_UpperCAmelCase : int = {'''streaming''': True}
_UpperCAmelCase : List[str] = load_dataset(args.dataset_name , split='''train''' , **UpperCamelCase__ )
_UpperCAmelCase : Any = ConstantLengthDataset(UpperCamelCase__ , UpperCamelCase__ , seq_length=args.seq_length )
_UpperCAmelCase : str = DataLoader(UpperCamelCase__ , batch_size=args.batch_size )
return eval_dataloader
def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ):
model.eval()
_UpperCAmelCase : Tuple = []
for step, batch in enumerate(UpperCamelCase__ ):
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
_UpperCAmelCase : int = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(UpperCamelCase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.cat(UpperCamelCase__ ) )
try:
_UpperCAmelCase : Dict = torch.exp(UpperCamelCase__ )
except OverflowError:
_UpperCAmelCase : List[Any] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowerCAmelCase :str = Accelerator()
# Parse configuration
_lowerCAmelCase :List[Any] = HfArgumentParser(EvaluationArguments)
_lowerCAmelCase :Any = parser.parse_args()
set_seed(args.seed)
# Logging
_lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_lowerCAmelCase :Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowerCAmelCase :List[str] = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowerCAmelCase :Optional[Any] = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowerCAmelCase,_lowerCAmelCase :Any = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_lowerCAmelCase,_lowerCAmelCase :List[Any] = evaluate(args)
logger.info(f"loss/eval: {eval_loss}, perplexity: {perplexity}")
| 68
|
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : int = sin(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Any = _sin / (2 * q_factor)
_UpperCAmelCase : Any = (1 - _cos) / 2
_UpperCAmelCase : Tuple = 1 - _cos
_UpperCAmelCase : List[str] = 1 + alpha
_UpperCAmelCase : Union[str, Any] = -2 * _cos
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Dict = (1 + _cos) / 2
_UpperCAmelCase : Dict = -1 - _cos
_UpperCAmelCase : Optional[Any] = 1 + alpha
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : List[Any] = tau * frequency / samplerate
_UpperCAmelCase : Optional[int] = sin(UpperCamelCase__ )
_UpperCAmelCase : Dict = cos(UpperCamelCase__ )
_UpperCAmelCase : str = _sin / (2 * q_factor)
_UpperCAmelCase : Tuple = _sin / 2
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : Dict = -ba
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : List[str] = -2 * _cos
_UpperCAmelCase : str = 1 - alpha
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float = 1 / sqrt(2 ) ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : Optional[Any] = 1 - alpha
_UpperCAmelCase : Optional[int] = -2 * _cos
_UpperCAmelCase : str = 1 + alpha
_UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : List[str] = tau * frequency / samplerate
_UpperCAmelCase : Union[str, Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : int = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : int = 10 ** (gain_db / 40)
_UpperCAmelCase : Union[str, Any] = 1 + alpha * big_a
_UpperCAmelCase : int = -2 * _cos
_UpperCAmelCase : Any = 1 - alpha * big_a
_UpperCAmelCase : Dict = 1 + alpha / big_a
_UpperCAmelCase : str = -2 * _cos
_UpperCAmelCase : Union[str, Any] = 1 - alpha / big_a
_UpperCAmelCase : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : str = tau * frequency / samplerate
_UpperCAmelCase : List[Any] = sin(UpperCamelCase__ )
_UpperCAmelCase : Union[str, Any] = cos(UpperCamelCase__ )
_UpperCAmelCase : Dict = _sin / (2 * q_factor)
_UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
_UpperCAmelCase : int = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : List[str] = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : List[Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Optional[int] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : Optional[Any] = big_a * (pmc + aaa)
_UpperCAmelCase : List[Any] = 2 * big_a * mpc
_UpperCAmelCase : Any = big_a * (pmc - aaa)
_UpperCAmelCase : Union[str, Any] = ppmc + aaa
_UpperCAmelCase : Dict = -2 * pmpc
_UpperCAmelCase : str = ppmc - aaa
_UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : float = 1 / sqrt(2 ) , ):
_UpperCAmelCase : Tuple = tau * frequency / samplerate
_UpperCAmelCase : Dict = sin(UpperCamelCase__ )
_UpperCAmelCase : str = cos(UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
_UpperCAmelCase : str = 10 ** (gain_db / 40)
_UpperCAmelCase : Any = (big_a + 1) - (big_a - 1) * _cos
_UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
_UpperCAmelCase : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
_UpperCAmelCase : Dict = (big_a - 1) + (big_a + 1) * _cos
_UpperCAmelCase : Union[str, Any] = 2 * sqrt(UpperCamelCase__ ) * alpha
_UpperCAmelCase : str = big_a * (ppmc + aaa)
_UpperCAmelCase : List[str] = -2 * big_a * pmpc
_UpperCAmelCase : Any = big_a * (ppmc - aaa)
_UpperCAmelCase : str = pmc + aaa
_UpperCAmelCase : Any = 2 * mpc
_UpperCAmelCase : Tuple = pmc - aaa
_UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 68
| 1
|
"""simple docstring"""
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : Optional[int] ,_snake_case : int ,_snake_case : int ) -> Tuple:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
lowercase__ : Dict = img
lowercase__ : List[Any] = img.shape[1]
lowercase__ : Optional[Any] = img.shape[0]
lowercase__ : List[Any] = dst_width
lowercase__ : Any = dst_height
lowercase__ : Union[str, Any] = self.src_w / self.dst_w
lowercase__ : str = self.src_h / self.dst_h
lowercase__ : Any = (
np.ones((self.dst_h, self.dst_w, 3) ,np.uinta ) * 255
)
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowercase__ : Optional[int] = self.img[self.get_y(_snake_case )][self.get_x(_snake_case )]
def UpperCAmelCase ( self : Dict ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCAmelCase ( self : Optional[int] ,_snake_case : int ) -> int:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
lowerCAmelCase_ ,lowerCAmelCase_ = 800, 600
lowerCAmelCase_ = imread('image_data/lena.jpg', 1)
lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
F'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 16
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88
| 0
|
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class A:
'''simple docstring'''
pass
| 357
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 208
| 0
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__a = trt.Logger(trt.Logger.WARNING)
__a = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__a = logging.getLogger(__name__)
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=3_8_4,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=1_2_8,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=2_0,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=3_0,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=4_2, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__a = parser.parse_args()
if args.tokenizer_name:
__a = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__a = args.per_device_eval_batch_size
__a = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__a = True
__a = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__a = 'temp_engine/bert-fp16.engine'
if args.inta:
__a = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__a = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__a = [network.get_input(i) for i in range(network.num_inputs)]
__a = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__a = 1 << 5_0
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__a = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__a = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def a ( snake_case__: str , snake_case__: Dict , snake_case__: Tuple , snake_case__: Any , snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = np.asarray(inputs['''input_ids'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa )
lowercase_ = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , snake_case_ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , snake_case_ )
# start time
lowercase_ = time.time()
# Run inference
context.execute_async(
bindings=[int(snake_case_ ) for d_inp in d_inputs] + [int(snake_case_ ), int(snake_case_ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
cuda.memcpy_dtoh_async(snake_case_ , snake_case_ , snake_case_ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase_ = time.time()
lowercase_ = end_time - start_time
lowercase_ = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__a = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__a = raw_datasets['validation'].column_names
__a = 'question' if 'question' in column_names else column_names[0]
__a = 'context' if 'context' in column_names else column_names[1]
__a = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__a = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__a = min(args.max_seq_length, tokenizer.model_max_length)
def a ( snake_case__: Dict ):
'''simple docstring'''
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase_ = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase_ = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=snake_case_ , stride=args.doc_stride , return_overflowing_tokens=snake_case_ , return_offsets_mapping=snake_case_ , padding='''max_length''' , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase_ = tokenized_examples.pop('''overflow_to_sample_mapping''' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase_ = []
for i in range(len(tokenized_examples['''input_ids'''] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase_ = tokenized_examples.sequence_ids(snake_case_ )
lowercase_ = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase_ = sample_mapping[i]
tokenized_examples["example_id"].append(examples['''id'''][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase_ = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] )
]
return tokenized_examples
__a = raw_datasets['validation']
# Validation Feature Creation
__a = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__a = default_data_collator
__a = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__a = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def a ( snake_case__: Tuple , snake_case__: Optional[int] , snake_case__: Optional[int] , snake_case__: Optional[Any]="eval" ):
'''simple docstring'''
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase_ = postprocess_qa_predictions(
examples=snake_case_ , features=snake_case_ , predictions=snake_case_ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=snake_case_ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase_ = [
{'''id''': k, '''prediction_text''': v, '''no_answer_probability''': 0.0} for k, v in predictions.items()
]
else:
lowercase_ = [{'''id''': k, '''prediction_text''': v} for k, v in predictions.items()]
lowercase_ = [{'''id''': ex['''id'''], '''answers''': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=snake_case_ , label_ids=snake_case_ )
__a = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def a ( snake_case__: int ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(snake_case_ ) ) * engine.get_binding_dtype(snake_case_ ).itemsize
# Allocate device memory for inputs and outputs.
__a = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__a = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__a = cuda.mem_alloc(h_outputa.nbytes)
__a = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__a = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
__a = 0.0
__a = 0
__a = timeit.default_timer()
__a = None
for step, batch in enumerate(eval_dataloader):
__a , __a = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__a , __a = outputs
__a = torch.tensor(start_logits)
__a = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__a = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_0_0)
__a = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_0_0)
__a = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__a = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_0_0)
if all_preds is not None:
__a = nested_truncate(all_preds, len(eval_dataset))
__a = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_0_0_0))
logger.info('Total Number of Inference = %d', niter)
__a = post_processing_function(eval_examples, eval_dataset, all_preds)
__a = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 30
|
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case_ = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : str
A_ : str
A_ : Optional[str] = None
A_ : Optional[str] = None
A_ : Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class SCREAMING_SNAKE_CASE__ :
A_ : List[int]
A_ : Optional[List[int]] = None
A_ : Optional[List[int]] = None
A_ : Optional[Union[int, float]] = None
A_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[InputFeatures]
def __init__(self : int , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = None , a__ : List[Any]=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = os.path.join(
a__ , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(a__ ) , a__ , ) , )
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__snake_case = cached_features_file + '''.lock'''
with FileLock(a__ ):
if os.path.exists(a__ ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
__snake_case = torch.load(a__ )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
__snake_case = (
processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
)
logger.info('''Training examples: %s''' , len(a__ ) )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
logger.info('''Saving features into cached file %s''' , a__ )
torch.save(self.features , a__ )
def __len__(self : int ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Dict , a__ : List[Any] ):
"""simple docstring"""
return self.features[i]
def a (self : List[Any] ):
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class SCREAMING_SNAKE_CASE__ :
A_ : List[InputFeatures]
def __init__(self : Tuple , a__ : str , a__ : PreTrainedTokenizer , a__ : str , a__ : Optional[int] = 128 , a__ : Any=False , a__ : bool = False , ):
"""simple docstring"""
__snake_case = hans_processors[task]()
__snake_case = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
__snake_case , __snake_case = label_list[2], label_list[1]
__snake_case = label_list
__snake_case = processor.get_dev_examples(a__ ) if evaluate else processor.get_train_examples(a__ )
__snake_case = hans_convert_examples_to_features(a__ , a__ , a__ , a__ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(a__ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
__snake_case = tf.data.Dataset.from_generator(
a__ , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def a (self : Union[str, Any] ):
"""simple docstring"""
return self.dataset
def __len__(self : Dict ):
"""simple docstring"""
return len(self.features )
def __getitem__(self : Any , a__ : Dict ):
"""simple docstring"""
return self.features[i]
def a (self : str ):
"""simple docstring"""
return self.label_list
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
def a (self : Dict , a__ : Dict ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_train_set.txt''' ) ) , '''train''' )
def a (self : Optional[int] , a__ : Tuple ):
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(a__ , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def a (self : int ):
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def a (self : Any , a__ : Optional[int] , a__ : List[Any] ):
"""simple docstring"""
__snake_case = []
for i, line in enumerate(a__ ):
if i == 0:
continue
__snake_case = '''%s-%s''' % (set_type, line[0])
__snake_case = line[5]
__snake_case = line[6]
__snake_case = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
__snake_case = line[0]
examples.append(InputExample(guid=a__ , text_a=a__ , text_b=a__ , label=a__ , pairID=a__ ) )
return examples
def lowerCamelCase__ ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , ) -> List[str]:
__snake_case = {label: i for i, label in enumerate(snake_case_ )}
__snake_case = []
for ex_index, example in tqdm.tqdm(enumerate(snake_case_ ) , desc='''convert examples to features''' ):
if ex_index % 1_0000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
__snake_case = tokenizer(
example.text_a , example.text_b , add_special_tokens=snake_case_ , max_length=snake_case_ , padding='''max_length''' , truncation=snake_case_ , return_overflowing_tokens=snake_case_ , )
__snake_case = label_map[example.label] if example.label in label_map else 0
__snake_case = int(example.pairID )
features.append(InputFeatures(**snake_case_ , label=snake_case_ , pairID=snake_case_ ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
snake_case_ = {
'hans': 3,
}
snake_case_ = {
'hans': HansProcessor,
}
| 24
| 0
|
"""simple docstring"""
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__snake_case = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def A_ ( _lowerCAmelCase : List[Any], _lowerCAmelCase : List[str]=None, _lowerCAmelCase : Optional[Any]=None, _lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
_a = True
while ask_again:
_a = input(_lowerCAmelCase )
try:
if default is not None and len(_lowerCAmelCase ) == 0:
return default
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_lowerCAmelCase )
def A_ ( _lowerCAmelCase : List[str], _lowerCAmelCase : Optional[Any]=[], _lowerCAmelCase : Tuple=None, _lowerCAmelCase : Dict=0 ):
"""simple docstring"""
_a = BulletMenu(_lowerCAmelCase, _lowerCAmelCase )
_a = menu.run(default_choice=_lowerCAmelCase )
return convert_value(_lowerCAmelCase ) if convert_value is not None else result
def A_ ( _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def A_ ( _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def A_ ( _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def A_ ( _lowerCAmelCase : Dict ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def A_ ( _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_a = int(_lowerCAmelCase )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def A_ ( _lowerCAmelCase : Any ):
"""simple docstring"""
return {"yes": True, "no": False}[value.lower()]
class __lowerCamelCase ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
_a = super()._format_usage(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
_a = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 366
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Dict:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _UpperCAmelCase ( self ) -> List[str]:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_euler''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe([prompt] , generator=__UpperCAmelCase , guidance_scale=9.0 , num_inference_steps=20 , output_type='''np''' )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _UpperCAmelCase ( self ) -> str:
_a = StableDiffusionKDiffusionPipeline.from_pretrained('''stabilityai/stable-diffusion-2-1-base''' )
_a = sd_pipe.to(__UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
sd_pipe.set_scheduler('''sample_dpmpp_2m''' )
_a = '''A painting of a squirrel eating a burger'''
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=15 , output_type='''np''' , use_karras_sigmas=__UpperCAmelCase , )
_a = output.images
_a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 153
| 0
|
import math
def snake_case ( snake_case__ :Optional[Any] , snake_case__ :Tuple) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(snake_case__)
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""")
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
_SCREAMING_SNAKE_CASE = 'Enter the base and the power separated by a comma: '
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input(prompt).split(','))
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = map(int, input(prompt).split(','))
# We find the log of each number, using the function res(), which takes two
# arguments.
_SCREAMING_SNAKE_CASE = res(xa, ya)
_SCREAMING_SNAKE_CASE = res(xa, ya)
# We check for the largest number
if resa > resa:
print('Largest number is', xa, '^', ya)
elif resa > resa:
print('Largest number is', xa, '^', ya)
else:
print('Both are equal')
| 180
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Tuple = JukeboxTokenizer
lowerCamelCase :str = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCAmelCase ( self ) -> Tuple:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCAmelCase ( self ) -> List[str]:
import torch
_A = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
_A = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
_A = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 180
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Tuple = "deberta-v2"
def __init__( self , a__=128_100 , a__=1_536 , a__=24 , a__=24 , a__=6_144 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=0 , a__=0.0_2 , a__=1e-7 , a__=False , a__=-1 , a__=0 , a__=True , a__=None , a__=0 , a__="gelu" , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = initializer_range
snake_case_ = relative_attention
snake_case_ = max_relative_positions
snake_case_ = pad_token_id
snake_case_ = position_biased_input
# Backwards compatibility
if type(a__ ) == str:
snake_case_ = [x.strip() for x in pos_att_type.lower().split("|" )]
snake_case_ = pos_att_type
snake_case_ = vocab_size
snake_case_ = layer_norm_eps
snake_case_ = kwargs.get("pooler_hidden_size" , a__ )
snake_case_ = pooler_dropout
snake_case_ = pooler_hidden_act
class _snake_case ( lowercase_ ):
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
snake_case_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case_ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return 12
def lowerCAmelCase__ ( self , a__ , a__ = -1 , a__ = -1 , a__ = -1 , a__ = False , a__ = None , a__ = 3 , a__ = 40 , a__ = 40 , a__ = None , ) -> Mapping[str, Any]:
'''simple docstring'''
snake_case_ = super().generate_dummy_inputs(preprocessor=a__ , framework=a__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 367
|
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = int(snake_case )
assert noofclusters < len(snake_case )
# Find out the dimensionality
snake_case_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
snake_case_ = list(range(len(snake_case ) ) )
shuffle(snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
snake_case_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
snake_case_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
snake_case_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
snake_case_ = tf.placeholder("float64" , [dim] )
snake_case_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(snake_case , snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
snake_case_ = [tf.Variable(0 ) for i in range(len(snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
snake_case_ = tf.placeholder("int32" )
snake_case_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(snake_case , snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
snake_case_ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
snake_case_ = tf.reduce_mean(snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.placeholder("float" , [dim] )
snake_case_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(snake_case , snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
snake_case_ = tf.placeholder("float" , [noofclusters] )
snake_case_ = tf.argmin(snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
snake_case_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
snake_case_ = 1_0_0
for _ in range(snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(snake_case ) ):
snake_case_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
snake_case_ = [
sess.run(snake_case , feed_dict={va: vect, va: sess.run(snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
snake_case_ = sess.run(
snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(snake_case ):
# Collect all the vectors assigned to this cluster
snake_case_ = [
vectors[i]
for i in range(len(snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
snake_case_ = sess.run(
snake_case , feed_dict={mean_input: array(snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
snake_case_ = sess.run(snake_case )
snake_case_ = sess.run(snake_case )
return centroids, assignments
| 92
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class a__ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
A__ = [[1, 2, 4], [1, 2, 3, 4]]
A__ = DisjunctiveConstraint(lowercase )
self.assertTrue(isinstance(dc.token_ids , lowercase ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase ):
DisjunctiveConstraint(lowercase ) # fails here
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4]]
A__ = DisjunctiveConstraint(lowercase )
A__ , A__ , A__ = dc.update(1 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
A__ = stepped is True and completed is False and reset is False
self.assertTrue(lowercase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(3 )
A__ = stepped is True and completed is True and reset is False
self.assertTrue(lowercase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A__ = DisjunctiveConstraint(lowercase )
A__ , A__ , A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A__ , A__ , A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A__ , A__ , A__ = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A__ , A__ , A__ = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A__ , A__ , A__ = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68
|
import datasets
from .evaluate import evaluate
lowerCAmelCase__ = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowerCAmelCase__ = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowerCAmelCase__ = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": {
"id": datasets.Value("string" ),
"prediction_text": datasets.features.Sequence(datasets.Value("string" ) ),
},
"references": {
"id": datasets.Value("string" ),
"answers": datasets.features.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
},
} ) , codebase_urls=["https://www.atticusprojectai.org/cuad"] , reference_urls=["https://www.atticusprojectai.org/cuad"] , )
def UpperCamelCase ( self , lowercase , lowercase ) -> Optional[int]:
'''simple docstring'''
A__ = {prediction["id"]: prediction["prediction_text"] for prediction in predictions}
A__ = [
{
"paragraphs": [
{
"qas": [
{
"answers": [{"text": answer_text} for answer_text in ref["answers"]["text"]],
"id": ref["id"],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=lowercase , predictions=lowercase )
return score
| 68
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( _lowercase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = LDMTextToImagePipeline
__lowercase : Optional[int] = TEXT_TO_IMAGE_PARAMS - {
'negative_prompt',
'negative_prompt_embeds',
'cross_attention_kwargs',
'prompt_embeds',
}
__lowercase : Any = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'callback',
'callback_steps',
}
__lowercase : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__lowercase : Union[str, Any] = False
def __A ( self ) -> List[str]:
torch.manual_seed(0 )
A_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
A_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
A_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A_ = CLIPTextModel(__UpperCamelCase )
A_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
A_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> int:
if str(__UpperCamelCase ).startswith('''mps''' ):
A_ = torch.manual_seed(__UpperCamelCase )
else:
A_ = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> Optional[int]:
A_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A_ = self.get_dummy_components()
A_ = LDMTextToImagePipeline(**__UpperCamelCase )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A_ = self.get_dummy_inputs(__UpperCamelCase )
A_ = pipe(**__UpperCamelCase ).images
A_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
A_ = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
A_ = torch.manual_seed(__UpperCamelCase )
A_ = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 32, 32) )
A_ = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> Optional[int]:
A_ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A_ = self.get_inputs(__UpperCamelCase )
A_ = pipe(**__UpperCamelCase ).images
A_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 256, 256, 3)
A_ = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] )
A_ = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=torch.floataa , _SCREAMING_SNAKE_CASE=0 ) -> int:
A_ = torch.manual_seed(__UpperCamelCase )
A_ = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 32, 32) )
A_ = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 50,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __A ( self ) -> Optional[int]:
A_ = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A_ = self.get_inputs(__UpperCamelCase )
A_ = pipe(**__UpperCamelCase ).images[0]
A_ = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
A_ = np.abs(expected_image - image ).max()
assert max_diff < 1E-3
| 359
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=0.6 , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = mask_ratio
A_ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Dict:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
A_ = ViTMAEModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = (self.image_size // self.patch_size) ** 2
A_ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ = 1
A_ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ = model(_SCREAMING_SNAKE_CASE )
A_ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> int:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : int = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
__lowercase : List[Any] = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
__lowercase : Union[str, Any] = False
__lowercase : List[Any] = False
__lowercase : List[str] = False
__lowercase : List[str] = False
def __A ( self ) -> Any:
A_ = ViTMAEModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def __A ( self ) -> int:
pass
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> int:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> Union[str, Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
# make masks reproducible
np.random.seed(2 )
A_ = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ = pt_noise
super().check_pt_tf_models(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
A_ = outputs[0].cpu().numpy()
A_ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE )
A_ = model_class.from_pretrained(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# Make sure we don't have nans
A_ = after_outputs[0].cpu().numpy()
A_ = 0
A_ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-5 )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> List[str]:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Dict:
pass
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def __A ( self ) -> Tuple:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def __A ( self ) -> str:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> Union[str, Any]:
pass
@slow
def __A ( self ) -> Dict:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ViTMAEModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Dict:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __A ( self ) -> List[str]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def __A ( self ) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A_ = ViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' ).to(_SCREAMING_SNAKE_CASE )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ = ViTMAEConfig()
A_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE , noise=torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ) )
# verify the logits
A_ = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(_SCREAMING_SNAKE_CASE ) , atol=1E-4 ) )
| 18
| 0
|
__A = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 90
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ) -> list:
if n_term == "":
return []
__lowerCamelCase : list = []
for temp in range(int(_lowerCAmelCase ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
_UpperCamelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 208
| 0
|
'''simple docstring'''
from collections.abc import Callable
class _A :
def __init__( self : List[Any] , __magic_name__ : Callable | None = None ) -> None:
"""simple docstring"""
__snake_case : list = []
# Stores indexes of each item for supporting updates and deletion.
__snake_case : dict = {}
# Stores current size of heap.
__snake_case : List[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
__snake_case : Union[str, Any] = key or (lambda __magic_name__ : x)
def lowercase__ ( self : Any , __magic_name__ : int ) -> int | None:
"""simple docstring"""
return int((i - 1) / 2 ) if i > 0 else None
def lowercase__ ( self : Optional[int] , __magic_name__ : int ) -> int | None:
"""simple docstring"""
__snake_case : Optional[Any] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def lowercase__ ( self : Tuple , __magic_name__ : int ) -> int | None:
"""simple docstring"""
__snake_case : List[Any] = int(2 * i + 2 )
return right if 0 < right < self.size else None
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case , __snake_case : List[Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
__snake_case , __snake_case : str = self.arr[j], self.arr[i]
def lowercase__ ( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
return self.arr[i][1] < self.arr[j][1]
def lowercase__ ( self : Dict , __magic_name__ : int ) -> int:
"""simple docstring"""
__snake_case : Any = self._left(__magic_name__ )
__snake_case : Optional[int] = self._right(__magic_name__ )
__snake_case : str = i
if left is not None and not self._cmp(__magic_name__ , __magic_name__ ):
__snake_case : Dict = left
if right is not None and not self._cmp(__magic_name__ , __magic_name__ ):
__snake_case : List[Any] = right
return valid_parent
def lowercase__ ( self : List[Any] , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Dict = self._parent(__magic_name__ )
while parent is not None and not self._cmp(__magic_name__ , __magic_name__ ):
self._swap(__magic_name__ , __magic_name__ )
__snake_case , __snake_case : Optional[int] = parent, self._parent(__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Dict = self._get_valid_parent(__magic_name__ )
while valid_parent != index:
self._swap(__magic_name__ , __magic_name__ )
__snake_case , __snake_case : Dict = valid_parent, self._get_valid_parent(__magic_name__ )
def lowercase__ ( self : List[str] , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
__snake_case : int = self.pos_map[item]
__snake_case : Union[str, Any] = [item, self.key(__magic_name__ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def lowercase__ ( self : Any , __magic_name__ : int ) -> None:
"""simple docstring"""
if item not in self.pos_map:
return
__snake_case : List[str] = self.pos_map[item]
del self.pos_map[item]
__snake_case : List[str] = self.arr[self.size - 1]
__snake_case : Dict = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(__magic_name__ )
self._heapify_down(__magic_name__ )
def lowercase__ ( self : str , __magic_name__ : int , __magic_name__ : int ) -> None:
"""simple docstring"""
__snake_case : Optional[Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(__magic_name__ )] )
else:
__snake_case : Dict = [item, self.key(__magic_name__ )]
__snake_case : Dict = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def lowercase__ ( self : str ) -> tuple | None:
"""simple docstring"""
return self.arr[0] if self.size else None
def lowercase__ ( self : Optional[Any] ) -> tuple | None:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _a ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _A ( __lowercase , unittest.TestCase ):
lowercase__: List[Any] = CanineTokenizer
lowercase__: Optional[int] = False
def lowercase__ ( self : Any ) -> Any:
"""simple docstring"""
super().setUp()
__snake_case : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def lowercase__ ( self : str , **__magic_name__ : List[Any] ) -> CanineTokenizer:
"""simple docstring"""
__snake_case : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
__snake_case : Optional[Any] = 10_24
return tokenizer
@require_torch
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : Optional[Any] = self.canine_tokenizer
__snake_case : List[str] = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
__snake_case : Dict = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
__snake_case : str = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
self.assertIsInstance(__magic_name__ , __magic_name__ )
__snake_case : Union[str, Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Any = self.canine_tokenizer
__snake_case : List[Any] = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
__snake_case : Tuple = tokenizer(__magic_name__ , padding=__magic_name__ , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , __magic_name__ )
self.assertIn("""attention_mask""" , __magic_name__ )
self.assertIn("""token_type_ids""" , __magic_name__ )
@require_torch
def lowercase__ ( self : int ) -> List[str]:
"""simple docstring"""
__snake_case : Dict = self.canine_tokenizer
__snake_case : Optional[Any] = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
__snake_case : Any = tokenizer(
text_target=__magic_name__ , max_length=32 , padding="""max_length""" , truncation=__magic_name__ , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def lowercase__ ( self : Tuple ) -> int:
"""simple docstring"""
__snake_case : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__snake_case : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Dict = tempfile.mkdtemp()
__snake_case : str = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : Dict = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
shutil.rmtree(__magic_name__ )
__snake_case : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
__snake_case : Optional[Any] = tempfile.mkdtemp()
__snake_case : List[str] = """ He is very happy, UNwant\u00E9d,running"""
__snake_case : Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__snake_case : List[Any] = chr(0xE007 )
additional_special_tokens.append(__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
__snake_case : List[str] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
tokenizer.save_pretrained(__magic_name__ )
__snake_case : Union[str, Any] = tokenizer.__class__.from_pretrained(__magic_name__ )
__snake_case : int = after_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertIn(__magic_name__ , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__snake_case : str = tokenizer.__class__.from_pretrained(__magic_name__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(__magic_name__ )
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case , __snake_case : Any = self.get_clean_sequence(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE005
__snake_case : Tuple = chr(__magic_name__ )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
__snake_case : Optional[Any] = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
__snake_case : Any = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=__magic_name__ )
__snake_case : Tuple = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Dict = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , input_encoded + special_token_id )
__snake_case : Tuple = tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
self.assertTrue(special_token not in decoded )
def lowercase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
__snake_case : Any = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : Dict = chr(0xE005 )
__snake_case : str = chr(0xE006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=__magic_name__ )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
__snake_case : Tuple = tokenizer.tokenize(__magic_name__ )
__snake_case : Any = tokenizer.tokenize(__magic_name__ )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(len(__magic_name__ ) , 1 )
self.assertEqual(token_a[0] , __magic_name__ )
self.assertEqual(token_a[0] , __magic_name__ )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__snake_case : str = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
__snake_case : Optional[Any] = 0xE006
__snake_case : List[str] = chr(__magic_name__ )
__snake_case : Optional[Any] = AddedToken(__magic_name__ , lstrip=__magic_name__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(__magic_name__ )
tokenizer.from_pretrained(__magic_name__ )
def lowercase__ ( self : Any ) -> int:
"""simple docstring"""
__snake_case : Union[str, Any] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__magic_name__ )
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Any = json.load(__magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
__snake_case : Tuple = json.load(__magic_name__ )
# a special token for Canine can be defined as follows:
__snake_case : Tuple = 0xE006
__snake_case : int = chr(__magic_name__ )
__snake_case : List[Any] = [new_token_a]
__snake_case : Union[str, Any] = [new_token_a]
with open(os.path.join(__magic_name__ , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
with open(os.path.join(__magic_name__ , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(__magic_name__ , __magic_name__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__snake_case : Tuple = tokenizer_class.from_pretrained(__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
__snake_case : Any = 0xE007
__snake_case : Any = chr(__magic_name__ )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__snake_case : Dict = [AddedToken(__magic_name__ , lstrip=__magic_name__ )]
__snake_case : Union[str, Any] = tokenizer_class.from_pretrained(
__magic_name__ , additional_special_tokens=__magic_name__ , extra_ids=0 )
self.assertIn(__magic_name__ , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case : int = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : List[str] = """hello world"""
if self.space_between_special_tokens:
__snake_case : Union[str, Any] = """[CLS] hello world [SEP]"""
else:
__snake_case : List[Any] = input
__snake_case : int = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
__snake_case : Any = tokenizer.decode(__magic_name__ , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(__magic_name__ , [output, output.lower()] )
def lowercase__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__snake_case : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__snake_case : str = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
__snake_case : Dict = """a"""
__snake_case : Tuple = ord(__magic_name__ )
for attr in attributes_list:
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , attr + """_id""" , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(getattr(__magic_name__ , attr + """_id""" ) , __magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [] )
__snake_case : Dict = 0xE006
__snake_case : str = chr(__magic_name__ )
setattr(__magic_name__ , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(__magic_name__ , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def lowercase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
pass
def lowercase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
def lowercase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def lowercase__ ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
| 13
| 1
|
def _SCREAMING_SNAKE_CASE ( a , a , a ) -> Dict:
if exponent == 1:
return base
if exponent % 2 == 0:
__A : Dict = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def _SCREAMING_SNAKE_CASE ( a = 17_77 , a = 18_55 , a = 8 ) -> str:
__A : Any = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
__A : Tuple = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 280
|
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def a__ ( _SCREAMING_SNAKE_CASE ): # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class _lowerCamelCase :
UpperCAmelCase_ = 42
UpperCAmelCase_ = 42
class _lowerCamelCase ( _lowercase ):
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 1
UpperCamelCase = [1, 2]
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": [1, 2], "b": [3, 4]}
UpperCamelCase = {"a": {"1": 1}, "b": 2}
UpperCamelCase = {"a": 1, "b": 2, "c": 3, "d": 4}
UpperCamelCase = {}
UpperCamelCase = []
UpperCamelCase = 2
UpperCamelCase = [2, 3]
UpperCamelCase = {"a": 2, "b": 3}
UpperCamelCase = {"a": [2, 3], "b": [4, 5]}
UpperCamelCase = {"a": {"1": 2}, "b": 3}
UpperCamelCase = {"a": 2, "b": 3, "c": 4, "d": 5}
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
self.assertEqual(map_nested(__a , __a ) , __a )
UpperCamelCase = 2
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
self.assertEqual(map_nested(__a , __a , num_proc=__a ) , __a )
UpperCamelCase = {"a": np.eye(2 ), "b": np.zeros(3 ), "c": np.ones(2 )}
UpperCamelCase = {"a": 2, "b": 0, "c": 2}
UpperCamelCase = {
"a": np.eye(2 ).astype(__a ),
"b": np.zeros(3 ).astype(__a ),
"c": np.ones(2 ).astype(__a ),
}
self.assertEqual(map_nested(__a , __a , map_numpy=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__a , __a , map_numpy=__a , num_proc=__a ) , __a )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__a , __a , map_numpy=__a , num_proc=__a ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__a ): # can't pickle a local lambda
map_nested(lambda __a : x + 1 , __a , num_proc=__a )
def snake_case_ (self ) -> Tuple:
UpperCamelCase = {"a": 1, "b": 2}
UpperCamelCase = {"a": 3, "b": 4}
UpperCamelCase = {"a": 5, "b": 6}
UpperCamelCase = sorted([("a", (1, 3, 5)), ("b", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__a , __a , __a ) ) , __a )
def snake_case_ (self ) -> Dict:
class _lowerCamelCase :
UpperCAmelCase_ = "bar"
UpperCamelCase = Foo()
self.assertEqual(foo.my_attr , "bar" )
with temporary_assignment(__a , "my_attr" , "BAR" ):
self.assertEqual(foo.my_attr , "BAR" )
self.assertEqual(foo.my_attr , "bar" )
@pytest.mark.parametrize(
"iterable_length, num_proc, expected_num_proc" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with patch("datasets.utils.py_utils._single_map_nested" ) as mock_single_map_nested, patch(
"datasets.parallel.parallel.Pool" ) as mock_multiprocessing_pool:
UpperCamelCase = {F"{i}": i for i in range(_SCREAMING_SNAKE_CASE )}
UpperCamelCase = map_nested(lambda _SCREAMING_SNAKE_CASE : x + 10 , _SCREAMING_SNAKE_CASE , num_proc=_SCREAMING_SNAKE_CASE , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class _lowerCamelCase ( _lowercase ):
@require_tf
def snake_case_ (self ) -> str:
import tensorflow as tf
from tensorflow.keras import layers
UpperCamelCase = layers.Dense(2 )
def gen_random_output():
UpperCamelCase = tf.random.uniform((1, 3) )
return model(__a ).numpy()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_tensorflow=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def snake_case_ (self ) -> Tuple:
import torch
def gen_random_output():
UpperCamelCase = torch.nn.Linear(3 , 2 )
UpperCamelCase = torch.rand(1 , 3 )
return model(__a ).detach().numpy()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
with temp_seed(42 , set_pytorch=__a ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def snake_case_ (self ) -> Tuple:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
with temp_seed(42 ):
UpperCamelCase = gen_random_output()
UpperCamelCase = gen_random_output()
np.testing.assert_equal(__a , __a )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("input_data" , [{}] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).data
assert output_data == input_data
@pytest.mark.parametrize(
"data, expected_output" , [
({}, []),
([], []),
("foo", ["foo"]),
(["foo", "bar"], ["foo", "bar"]),
([["foo", "bar"]], ["foo", "bar"]),
([[["foo"], ["bar"]]], ["foo", "bar"]),
([[["foo"], "bar"]], ["foo", "bar"]),
({"a": 1, "b": 2}, [1, 2]),
({"a": [1, 2], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[1, 2]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[[3], [4]]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [[3, 4]]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, 4]}, [1, 2, 3, 4]),
({"a": [[[1], [2]]], "b": [3, [4]]}, [1, 2, 3, 4]),
({"a": {"1": 1}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": 2}, [1, 2]),
({"a": {"1": [1]}, "b": [2]}, [1, 2]),
] , )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = NestedDataStructure(_SCREAMING_SNAKE_CASE ).flatten()
assert output == expected_output
def a__ ( ):
"""simple docstring"""
UpperCamelCase = A(x=1 , y="foobar" )
UpperCamelCase = {"x": 1, "y": "foobar"}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
UpperCamelCase = {"a": {"b": A(x=10 , y="foo" )}, "c": [A(x=20 , y="bar" )]}
UpperCamelCase = {"a": {"b": {"x": 10, "y": "foo"}}, "c": [{"x": 20, "y": "bar"}]}
assert asdict(_SCREAMING_SNAKE_CASE ) == expected_output
with pytest.raises(_SCREAMING_SNAKE_CASE ):
asdict([1, A(x=10 , y="foo" )] )
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return text.split()
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def a__ ( ):
"""simple docstring"""
with Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
UpperCamelCase = list(iflatmap_unordered(_SCREAMING_SNAKE_CASE , _split_text , kwargs_iterable=[{"text": "hello there"}] * 10 ) )
assert out.count("hello" ) == 10
assert out.count("there" ) == 10
assert len(_SCREAMING_SNAKE_CASE ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
UpperCamelCase = []
for yield_time, content in iflatmap_unordered(
_SCREAMING_SNAKE_CASE , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"content": "a"}, {"content": "b"}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_SCREAMING_SNAKE_CASE )
assert out.count("a" ) == 2
assert out.count("b" ) == 2
assert len(_SCREAMING_SNAKE_CASE ) == 4
| 153
| 0
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
|
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float ):
'''simple docstring'''
if days_between_payments <= 0:
raise ValueError('''days_between_payments must be > 0''' )
if daily_interest_rate < 0:
raise ValueError('''daily_interest_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * daily_interest_rate * days_between_payments
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_compounding_periods <= 0:
raise ValueError('''number_of_compounding_periods must be > 0''' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('''nominal_annual_interest_rate_percentage must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def _lowerCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : float , ):
'''simple docstring'''
if number_of_years <= 0:
raise ValueError('''number_of_years must be > 0''' )
if nominal_annual_percentage_rate < 0:
raise ValueError('''nominal_annual_percentage_rate must be >= 0''' )
if principal <= 0:
raise ValueError('''principal must be > 0''' )
return compound_interest(
UpperCAmelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157
| 1
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __A ( self , A ) -> Dict:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
lowerCamelCase = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_A )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """sgugger/tiny-distilbert-classification"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , torchscript=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __A ( self ) -> Dict:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , fpaa=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Any:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = AutoConfig.from_pretrained(_A )
# set architectures equal to `None`
lowerCamelCase = None
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __A ( self ) -> int:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_A , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = AutoConfig.from_pretrained(_A )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase = """sshleifer/tinier_bart"""
lowerCamelCase = AutoConfig.from_pretrained(_A )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __A ( self ) -> List[Any]:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
lowerCamelCase = AutoConfig.from_pretrained(_A )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = """sshleifer/tinier_bart"""
lowerCamelCase = AutoConfig.from_pretrained(_A )
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A , configs=[config] )
lowerCamelCase = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_A , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_A , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_A , """train_time.csv""" ) , env_info_csv_file=os.path.join(_A , """env.csv""" ) , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
benchmark.run()
self.assertTrue(Path(os.path.join(_A , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_A , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_A , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_A , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_A , """env.csv""" ) ).exists() )
def __A ( self ) -> str:
'''simple docstring'''
lowerCamelCase = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(A ):
self.assertTrue(hasattr(_A , """sequential""" ) )
self.assertTrue(hasattr(_A , """cumulative""" ) )
self.assertTrue(hasattr(_A , """current""" ) )
self.assertTrue(hasattr(_A , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , """log.txt""" ) , log_print=_A , trace_memory_line_by_line=_A , multi_process=_A , )
lowerCamelCase = PyTorchBenchmark(_A )
lowerCamelCase = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_A , """log.txt""" ) ).exists() )
| 252
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class a__ ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
_a : str = StableUnCLIPPipeline
_a : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = 3_2
__lowerCAmelCase = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=_A , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=_A , num_layers=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=_A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__lowerCAmelCase = StableUnCLIPImageNormalizer(embedding_dim=_A )
__lowerCAmelCase = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=_A , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=_A , layers_per_block=1 , upcast_attention=_A , use_linear_projection=_A , )
torch.manual_seed(0 )
__lowerCAmelCase = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="v_prediction" , set_alpha_to_one=_A , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL()
__lowerCAmelCase = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def __SCREAMING_SNAKE_CASE( self , _A , _A=0 ):
"""simple docstring"""
if str(_A ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(_A )
else:
__lowerCAmelCase = torch.Generator(device=_A ).manual_seed(_A )
__lowerCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=_A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=_A )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
__lowerCAmelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__lowerCAmelCase = pipe("anime turle" , generator=_A , output_type="np" )
__lowerCAmelCase = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_A , _A )
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCAmelCase = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__lowerCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCAmelCase = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__lowerCAmelCase = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 92
| 0
|
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def a_ ( ):
_UpperCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCamelCase : int = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( _lowerCAmelCase ):
UpperCamelCase = ['''image_processor''', '''tokenizer''']
UpperCamelCase = '''BlipImageProcessor'''
UpperCamelCase = '''AutoTokenizer'''
def __init__( self : List[str], lowerCAmelCase__ : Optional[int], lowerCAmelCase__ : Optional[int] ) -> int:
'''simple docstring'''
_UpperCamelCase : Any = False
super().__init__(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Tuple = self.image_processor
def __call__( self : str, lowerCAmelCase__ : ImageInput = None, lowerCAmelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Union[bool, str, PaddingStrategy] = False, lowerCAmelCase__ : Union[bool, str, TruncationStrategy] = None, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : int = 0, lowerCAmelCase__ : Optional[int] = None, lowerCAmelCase__ : Optional[bool] = None, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = False, lowerCAmelCase__ : bool = True, lowerCAmelCase__ : Optional[Union[str, TensorType]] = None, **lowerCAmelCase__ : Optional[Any], ) -> BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
_UpperCamelCase : int = self.tokenizer
_UpperCamelCase : List[str] = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
return text_encoding
# add pixel_values
_UpperCamelCase : List[str] = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__ )
if text is not None:
_UpperCamelCase : Any = self.tokenizer(
text=lowerCAmelCase__, add_special_tokens=lowerCAmelCase__, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=lowerCAmelCase__, stride=lowerCAmelCase__, pad_to_multiple_of=lowerCAmelCase__, return_attention_mask=lowerCAmelCase__, return_overflowing_tokens=lowerCAmelCase__, return_special_tokens_mask=lowerCAmelCase__, return_offsets_mapping=lowerCAmelCase__, return_token_type_ids=lowerCAmelCase__, return_length=lowerCAmelCase__, verbose=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__, )
else:
_UpperCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def snake_case ( self : List[Any], *lowerCAmelCase__ : List[str], **lowerCAmelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__ )
def snake_case ( self : List[Any], *lowerCAmelCase__ : Dict, **lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def snake_case ( self : Any ) -> str:
'''simple docstring'''
_UpperCamelCase : List[str] = self.tokenizer.model_input_names
_UpperCamelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 128
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class snake_case ( __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : Any , __A : int = 7_6_8 , ):
super().__init__()
__UpperCamelCase = nn.Parameter(torch.zeros(1 , __A ) )
__UpperCamelCase = nn.Parameter(torch.ones(1 , __A ) )
def _lowerCamelCase ( self : Optional[Any] , __A : Optional[Union[str, torch.device]] = None , __A : Optional[torch.dtype] = None , ):
__UpperCamelCase = nn.Parameter(self.mean.to(__A ).to(__A ) )
__UpperCamelCase = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def _lowerCamelCase ( self : Optional[int] , __A : Optional[int] ):
__UpperCamelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def _lowerCamelCase ( self : Optional[int] , __A : Tuple ):
__UpperCamelCase = (embeds * self.std) + self.mean
return embeds
| 53
|
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[Any] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : List[str] = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = TextDatasetReader(lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase , split=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = text_path
elif issubclass(lowerCAmelCase , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [text_path]
SCREAMING_SNAKE_CASE_ : int = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Optional[int] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : List[str] = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_dataset(lowerCAmelCase , lowerCAmelCase )
def _snake_case ( lowerCAmelCase : Dict , lowerCAmelCase : Dict , lowerCAmelCase : List[str]=("train",) ):
"""simple docstring"""
assert isinstance(lowerCAmelCase , lowerCAmelCase )
for split in splits:
SCREAMING_SNAKE_CASE_ : int = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _snake_case ( lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
SCREAMING_SNAKE_CASE_ : List[Any] = TextDatasetReader({"train": text_path} , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
SCREAMING_SNAKE_CASE_ : Tuple = {"text": "string"}
SCREAMING_SNAKE_CASE_ : Any = features.copy() if features else default_expected_features
SCREAMING_SNAKE_CASE_ : Dict = (
Features({feature: Value(lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader({"train": text_path} , features=lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def _snake_case ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any , lowerCAmelCase : Dict ):
"""simple docstring"""
if split:
SCREAMING_SNAKE_CASE_ : Optional[int] = {split: text_path}
else:
SCREAMING_SNAKE_CASE_ : List[Any] = "train"
SCREAMING_SNAKE_CASE_ : Tuple = {"train": text_path, "test": text_path}
SCREAMING_SNAKE_CASE_ : Any = tmp_path / "cache"
SCREAMING_SNAKE_CASE_ : List[str] = {"text": "string"}
SCREAMING_SNAKE_CASE_ : str = TextDatasetReader(lowerCAmelCase , cache_dir=lowerCAmelCase ).read()
_check_text_datasetdict(lowerCAmelCase , lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 18
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
lowerCamelCase_ = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
lowerCamelCase_ = dataset.iloc[:, 1:2].values
lowerCamelCase_ = dataset.iloc[:, 2].values
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(X, y, test_size=0.2, random_state=0)
lowerCamelCase_ = PolynomialFeatures(degree=4)
lowerCamelCase_ = poly_reg.fit_transform(X)
lowerCamelCase_ = LinearRegression()
pol_reg.fit(X_poly, y)
def lowerCamelCase ( ) -> List[str]:
plt.scatter(a_ , a_ , color='red' )
plt.plot(a_ , pol_reg.predict(poly_reg.fit_transform(a_ ) ) , color='blue' )
plt.title('Truth or Bluff (Linear Regression)' )
plt.xlabel('Position level' )
plt.ylabel('Salary' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 14
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14
| 1
|
from collections.abc import Callable
class __lowercase :
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : Callable | None = None):
# Stores actual heap items.
SCREAMING_SNAKE_CASE_: list = []
# Stores indexes of each item for supporting updates and deletion.
SCREAMING_SNAKE_CASE_: dict = {}
# Stores current size of heap.
SCREAMING_SNAKE_CASE_: Optional[Any] = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
SCREAMING_SNAKE_CASE_: Any = key or (lambda lowerCAmelCase__: x)
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : int):
return int((i - 1) / 2) if i > 0 else None
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 1)
return left if 0 < left < self.size else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Union[str, Any] = int(2 * i + 2)
return right if 0 < right < self.size else None
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = self.arr[j], self.arr[i]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
return self.arr[i][1] < self.arr[j][1]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Any = self._left(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._right(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = i
if left is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Optional[int] = left
if right is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
SCREAMING_SNAKE_CASE_: Tuple = right
return valid_parent
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: List[Any] = self._parent(lowerCAmelCase__)
while parent is not None and not self._cmp(lowerCAmelCase__ , lowerCAmelCase__):
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = parent, self._parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = self._get_valid_parent(lowerCAmelCase__)
while valid_parent != index:
self._swap(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = valid_parent, self._get_valid_parent(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Any = self.pos_map[item]
SCREAMING_SNAKE_CASE_: int = [item, self.key(lowerCAmelCase__)]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : int):
if item not in self.pos_map:
return
SCREAMING_SNAKE_CASE_: Optional[Any] = self.pos_map[item]
del self.pos_map[item]
SCREAMING_SNAKE_CASE_: List[str] = self.arr[self.size - 1]
SCREAMING_SNAKE_CASE_: Tuple = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowerCAmelCase__)
self._heapify_down(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.arr)
if arr_len == self.size:
self.arr.append([item, self.key(lowerCAmelCase__)])
else:
SCREAMING_SNAKE_CASE_: str = [item, self.key(lowerCAmelCase__)]
SCREAMING_SNAKE_CASE_: List[Any] = self.size
self.size += 1
self._heapify_up(self.size - 1)
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
return self.arr[0] if self.size else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Dict = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0])
return top_item_tuple
def A_ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13
| 1
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCamelCase_ = logging.get_logger(__name__)
def lowerCamelCase ( a_ , a_ , a_ , a_ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a_ , a_ , a_=0 , a_=None ):
lowerCAmelCase_ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowerCAmelCase_ = math.floor(val / multiple ) * multiple
if x < min_val:
lowerCAmelCase_ = math.ceil(val / multiple ) * multiple
return x
lowerCAmelCase_ = (output_size, output_size) if isinstance(a_ , a_ ) else output_size
lowerCAmelCase_ , lowerCAmelCase_ = get_image_size(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = output_size
# determine new height and width
lowerCAmelCase_ = output_height / input_height
lowerCAmelCase_ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowerCAmelCase_ = scale_width
else:
# fit height
lowerCAmelCase_ = scale_height
lowerCAmelCase_ = constraint_to_multiple_of(scale_height * input_height , multiple=a_ )
lowerCAmelCase_ = constraint_to_multiple_of(scale_width * input_width , multiple=a_ )
return (new_height, new_width)
class a_ ( a_ ):
'''simple docstring'''
__a: Union[str, Any] = ['''pixel_values''']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = False , lowercase_ = 1 , lowercase_ = True , lowercase_ = 1 / 2_5_5 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ) -> None:
'''simple docstring'''
super().__init__(**lowercase_ )
lowerCAmelCase_ = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = do_resize
lowerCAmelCase_ = size
lowerCAmelCase_ = keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of
lowerCAmelCase_ = resample
lowerCAmelCase_ = do_rescale
lowerCAmelCase_ = rescale_factor
lowerCAmelCase_ = do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = False , lowercase_ = 1 , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase_ = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowerCAmelCase_ = get_resize_output_image_size(
lowercase_ , output_size=(size['height'], size['width']) , keep_aspect_ratio=lowercase_ , multiple=lowercase_ , )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> Dict:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ) -> PIL.Image.Image:
'''simple docstring'''
lowerCAmelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase_ = size if size is not None else self.size
lowerCAmelCase_ = get_size_dict(lowercase_ )
lowerCAmelCase_ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowerCAmelCase_ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowerCAmelCase_ = resample if resample is not None else self.resample
lowerCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase_ = image_std if image_std is not None else self.image_std
lowerCAmelCase_ = make_list_of_images(lowercase_ )
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCAmelCase_ = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
lowerCAmelCase_ = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_rescale:
lowerCAmelCase_ = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
lowerCAmelCase_ = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
lowerCAmelCase_ = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
lowerCAmelCase_ = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
def _lowercase ( self , lowercase_ , lowercase_ = None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(lowercase_ ):
lowerCAmelCase_ = target_sizes.numpy()
lowerCAmelCase_ = []
for idx in range(len(lowercase_ ) ):
lowerCAmelCase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=lowercase_ )
lowerCAmelCase_ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase_ )
else:
lowerCAmelCase_ = logits.argmax(dim=1 )
lowerCAmelCase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 14
| 1
|
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case ( unittest.TestCase ):
def __init__( self: List[Any] , __lowerCamelCase: Optional[int] , __lowerCamelCase: List[Any]=3 , __lowerCamelCase: List[Any]=32 , __lowerCamelCase: Optional[Any]=3 , __lowerCamelCase: Optional[int]=10 , __lowerCamelCase: Union[str, Any]=[10, 20, 30, 40] , __lowerCamelCase: str=[1, 1, 2, 1] , __lowerCamelCase: Tuple=True , __lowerCamelCase: List[str]=True , __lowerCamelCase: List[Any]="relu" , __lowerCamelCase: Optional[int]=3 , __lowerCamelCase: Union[str, Any]=None , ) -> int:
__UpperCAmelCase : Any = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : Optional[int] = image_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : List[Any] = embeddings_size
__UpperCAmelCase : Optional[int] = hidden_sizes
__UpperCAmelCase : List[Any] = depths
__UpperCAmelCase : List[str] = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : List[str] = scope
__UpperCAmelCase : str = len(__lowerCamelCase )
def _lowerCamelCase ( self: Any ) -> Optional[int]:
__UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = self.get_config()
return config, pixel_values
def _lowerCamelCase ( self: List[Any] ) -> Dict:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: List[str] , __lowerCamelCase: Any ) -> List[str]:
__UpperCAmelCase : int = FlaxRegNetModel(config=__lowerCamelCase )
__UpperCAmelCase : Dict = model(__lowerCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _lowerCamelCase ( self: Dict , __lowerCamelCase: Tuple , __lowerCamelCase: Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = self.num_labels
__UpperCAmelCase : int = FlaxRegNetForImageClassification(config=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self: Dict ) -> Tuple:
__UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase : List[str] = config_and_inputs
__UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: int = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCamelCase__: Optional[int] = False
lowerCamelCase__: str = False
lowerCamelCase__: List[Any] = False
def _lowerCamelCase ( self: Any ) -> None:
__UpperCAmelCase : Optional[int] = FlaxRegNetModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _lowerCamelCase ( self: Tuple ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self: Optional[int] ) -> Any:
return
def _lowerCamelCase ( self: List[Any] ) -> Dict:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowerCamelCase ( self: List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowerCamelCase ( self: Optional[Any] ) -> List[str]:
pass
def _lowerCamelCase ( self: Optional[int] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
__UpperCAmelCase : int = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Tuple = [*signature.parameters.keys()]
__UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCamelCase ( self: List[Any] ) -> Dict:
def check_hidden_states_output(__lowerCamelCase: Tuple , __lowerCamelCase: Optional[Any] , __lowerCamelCase: List[str] ):
__UpperCAmelCase : int = model_class(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
__UpperCAmelCase : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCamelCase ( self: Optional[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = model_class(__lowerCamelCase )
@jax.jit
def model_jitted(__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: Tuple ):
return model(pixel_values=__lowerCamelCase , **__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__UpperCAmelCase : int = model_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__UpperCAmelCase : int = model_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _UpperCamelCase ( ) -> Dict:
__UpperCAmelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class _snake_case ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def _lowerCamelCase ( self: Tuple ) -> int:
__UpperCAmelCase : Tuple = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
__UpperCAmelCase : Optional[Any] = self.default_image_processor
__UpperCAmelCase : Optional[Any] = prepare_img()
__UpperCAmelCase : str = image_processor(images=__lowerCamelCase , return_tensors="np" )
__UpperCAmelCase : Optional[int] = model(**__lowerCamelCase )
# verify the logits
__UpperCAmelCase : Any = (1, 10_00)
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 157
|
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Union[str, Any] = abs(snake_case__ )
__UpperCAmelCase : Dict = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase ( snake_case__ ) -> int:
__UpperCAmelCase : Tuple = abs(snake_case__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase ( snake_case__ ) -> int:
return sum(int(snake_case__ ) for c in str(abs(snake_case__ ) ) )
def _UpperCamelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case__, snake_case__ ) -> None:
__UpperCAmelCase : Union[str, Any] = f'''{func.__name__}({value})'''
__UpperCAmelCase : List[str] = timeit(f'''__main__.{call}''', setup="import __main__" )
print(f'''{call:56} = {func(snake_case__ )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case__, snake_case__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 157
| 1
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowercase = logging.get_logger(__name__)
class __lowercase ( A ):
'''simple docstring'''
def __init__( self : List[Any] , *_a : Dict , **_a : int ):
warnings.warn(
'''The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use VideoMAEImageProcessor instead.''' , _a , )
super().__init__(*_a , **_a )
| 35
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : List[Any] ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def A_ ( self : Tuple ):
UpperCamelCase__ = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = get_activation('''gelu_10''' )
UpperCamelCase__ = torch_builtin(_a )
UpperCamelCase__ = geluaa(_a )
UpperCamelCase__ = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def A_ ( self : str ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def A_ ( self : List[Any] ):
UpperCamelCase__ = get_activation('''gelu''' )
UpperCamelCase__ = 1
UpperCamelCase__ = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
UpperCamelCase__ = acta.a
| 35
| 1
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowercase = logging.get_logger(__name__)
lowercase = r"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
@add_start_docstrings(snake_case__ )
def __call__( self , a , a , **a ) -> str:
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
def __init__( self , a , a = None ) -> Any:
snake_case_ = max_length
snake_case_ = max_position_embeddings
@add_start_docstrings(snake_case__ )
def __call__( self , a , a , **a ) -> Union[str, Any]:
snake_case_ = input_ids.shape[-1]
snake_case_ = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'exceptions, performance degradation, or nothing at all.' )
return is_done
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
def __init__( self , a , a ) -> Union[str, Any]:
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'with `max_length = start_length + max_new_tokens` instead.' , snake_case__ , )
snake_case_ = start_length
snake_case_ = max_new_tokens
snake_case_ = start_length + max_new_tokens
@add_start_docstrings(snake_case__ )
def __call__( self , a , a , **a ) -> Dict:
return input_ids.shape[-1] >= self.max_length
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
def __init__( self , a , a = None ) -> Optional[int]:
snake_case_ = max_time
snake_case_ = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(snake_case__ )
def __call__( self , a , a , **a ) -> List[str]:
return time.time() - self.initial_timestamp > self.max_time
class UpperCamelCase_ ( a_ ):
'''simple docstring'''
@add_start_docstrings(snake_case__ )
def __call__( self , a , a , **a ) -> Optional[Any]:
return any(criteria(snake_case__ , snake_case__ ) for criteria in self )
@property
def _UpperCamelCase ( self ) -> Union[str, Any]:
for stopping_criterium in self:
if isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
elif isinstance(snake_case__ , snake_case__ ):
return stopping_criterium.max_length
return None
def __UpperCAmelCase ( a_ , a_):
snake_case_ = stopping_criteria.max_length
snake_case_ = deepcopy(_lowerCAmelCase)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter' , _lowerCAmelCase)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase))
return new_stopping_criteria
| 178
|
UpperCAmelCase : Optional[Any] ={
"""Pillow""": """Pillow<10.0.0""",
"""accelerate""": """accelerate>=0.20.3""",
"""av""": """av==9.2.0""",
"""beautifulsoup4""": """beautifulsoup4""",
"""black""": """black~=23.1""",
"""codecarbon""": """codecarbon==1.2.0""",
"""cookiecutter""": """cookiecutter==1.7.3""",
"""dataclasses""": """dataclasses""",
"""datasets""": """datasets!=2.5.0""",
"""decord""": """decord==0.6.0""",
"""deepspeed""": """deepspeed>=0.9.3""",
"""diffusers""": """diffusers""",
"""dill""": """dill<0.3.5""",
"""evaluate""": """evaluate>=0.2.0""",
"""fairscale""": """fairscale>0.3""",
"""faiss-cpu""": """faiss-cpu""",
"""fastapi""": """fastapi""",
"""filelock""": """filelock""",
"""flax""": """flax>=0.4.1,<=0.7.0""",
"""ftfy""": """ftfy""",
"""fugashi""": """fugashi>=1.0""",
"""GitPython""": """GitPython<3.1.19""",
"""hf-doc-builder""": """hf-doc-builder>=0.3.0""",
"""huggingface-hub""": """huggingface-hub>=0.14.1,<1.0""",
"""importlib_metadata""": """importlib_metadata""",
"""ipadic""": """ipadic>=1.0.0,<2.0""",
"""isort""": """isort>=5.5.4""",
"""jax""": """jax>=0.2.8,!=0.3.2,<=0.4.13""",
"""jaxlib""": """jaxlib>=0.1.65,<=0.4.13""",
"""jieba""": """jieba""",
"""kenlm""": """kenlm""",
"""keras-nlp""": """keras-nlp>=0.3.1""",
"""librosa""": """librosa""",
"""nltk""": """nltk""",
"""natten""": """natten>=0.14.6""",
"""numpy""": """numpy>=1.17""",
"""onnxconverter-common""": """onnxconverter-common""",
"""onnxruntime-tools""": """onnxruntime-tools>=1.4.2""",
"""onnxruntime""": """onnxruntime>=1.4.0""",
"""opencv-python""": """opencv-python""",
"""optuna""": """optuna""",
"""optax""": """optax>=0.0.8,<=0.1.4""",
"""packaging""": """packaging>=20.0""",
"""parameterized""": """parameterized""",
"""phonemizer""": """phonemizer""",
"""protobuf""": """protobuf""",
"""psutil""": """psutil""",
"""pyyaml""": """pyyaml>=5.1""",
"""pydantic""": """pydantic<2""",
"""pytest""": """pytest>=7.2.0""",
"""pytest-timeout""": """pytest-timeout""",
"""pytest-xdist""": """pytest-xdist""",
"""python""": """python>=3.8.0""",
"""ray[tune]""": """ray[tune]""",
"""regex""": """regex!=2019.12.17""",
"""requests""": """requests""",
"""rhoknp""": """rhoknp>=1.1.0,<1.3.1""",
"""rjieba""": """rjieba""",
"""rouge-score""": """rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1""",
"""ruff""": """ruff>=0.0.241,<=0.0.259""",
"""sacrebleu""": """sacrebleu>=1.4.12,<2.0.0""",
"""sacremoses""": """sacremoses""",
"""safetensors""": """safetensors>=0.3.1""",
"""sagemaker""": """sagemaker>=2.31.0""",
"""scikit-learn""": """scikit-learn""",
"""sentencepiece""": """sentencepiece>=0.1.91,!=0.1.92""",
"""sigopt""": """sigopt""",
"""starlette""": """starlette""",
"""sudachipy""": """sudachipy>=0.6.6""",
"""sudachidict_core""": """sudachidict_core>=20220729""",
"""tensorflow-cpu""": """tensorflow-cpu>=2.6,<2.14""",
"""tensorflow""": """tensorflow>=2.6,<2.14""",
"""tensorflow-text""": """tensorflow-text<2.14""",
"""tf2onnx""": """tf2onnx""",
"""timeout-decorator""": """timeout-decorator""",
"""timm""": """timm""",
"""tokenizers""": """tokenizers>=0.11.1,!=0.11.3,<0.14""",
"""torch""": """torch>=1.9,!=1.12.0""",
"""torchaudio""": """torchaudio""",
"""torchvision""": """torchvision""",
"""pyctcdecode""": """pyctcdecode>=0.4.0""",
"""tqdm""": """tqdm>=4.27""",
"""unidic""": """unidic>=1.0.2""",
"""unidic_lite""": """unidic_lite>=1.0.7""",
"""urllib3""": """urllib3<2.0.0""",
"""uvicorn""": """uvicorn""",
}
| 128
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase : str = {
"""configuration_vision_encoder_decoder""": ["""VisionEncoderDecoderConfig""", """VisionEncoderDecoderOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[Any] = ["""VisionEncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Optional[int] = ["""TFVisionEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[str] = ["""FlaxVisionEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowercase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 225
|
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def A_ ( A__ ) -> Dict:
if isinstance(A__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class A__ :
"""simple docstring"""
def __lowercase ( self , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self) -> Dict:
'''simple docstring'''
pass
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> List[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : List[str] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Dict = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Tuple:
'''simple docstring'''
a__ , a__ : Any = self.get_vision_text_model(lowercase , lowercase)
a__ : Tuple = {'vision_model': vision_model, 'text_model': text_model}
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase)
a__ : Any = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim))
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : int = self.get_vision_text_model(lowercase , lowercase)
a__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[Any] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : int = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase)
a__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : List[str] = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase)
a__ : str = after_output[0].numpy()
a__ : str = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[int]:
'''simple docstring'''
a__ , a__ : Optional[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : Optional[int] = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
a__ : Optional[Any] = to_atuple(vision_model.config.image_size)
a__ : Dict = to_atuple(vision_model.config.patch_size)
a__ : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : Union[str, Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : str = np.abs((a - b)).max()
self.assertLessEqual(lowercase , lowercase , F'Difference between torch and flax is {diff} (>= {tol}).')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
self.check_save_load(**lowercase)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : List[str] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase)
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.get_pretrained_model_and_inputs()
a__ : Optional[int] = model_a(**lowercase)
a__ : int = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase)
a__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase)
a__ : int = model_a(**lowercase)
a__ : str = after_outputs[0].numpy()
a__ : List[Any] = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(lowercase , 1e-5)
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert')
a__ : str = 13
a__ : Optional[int] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Optional[int] = random_attention_mask([batch_size, 4])
a__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> List[Any]:
'''simple docstring'''
a__ : Optional[Any] = TFViTModel(lowercase , name='vision_model')
a__ : Tuple = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFViTModelTester(self)
a__ : int = TFBertModelTester(self)
a__ : Optional[Any] = vit_model_tester.prepare_config_and_inputs()
a__ : Any = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Optional[int] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Optional[Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta')
a__ : Union[str, Any] = 13
a__ : Dict = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : Optional[Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : Any = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase) -> Optional[Any]:
'''simple docstring'''
a__ , a__ : List[Any] = self.get_vision_text_model(lowercase , lowercase)
a__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase , text_model=lowercase)
a__ : int = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase)
a__ : Optional[Any] = output.vision_model_output.attentions
self.assertEqual(len(lowercase) , vision_config.num_hidden_layers)
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
a__ : Optional[int] = to_atuple(vision_model.config.image_size)
a__ : str = to_atuple(vision_model.config.patch_size)
a__ : Tuple = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
a__ : List[str] = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len))
a__ : List[Any] = output.text_model_output.attentions
self.assertEqual(len(lowercase) , text_config.num_hidden_layers)
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = TFDeiTModel(lowercase , name='vision_model')
a__ : Optional[int] = TFRobertaModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = TFDeiTModelTester(self)
a__ : str = TFRobertaModelTester(self)
a__ : str = vit_model_tester.prepare_config_and_inputs()
a__ : Dict = bert_model_tester.prepare_config_and_inputs()
a__ , a__ , a__ : Any = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Tuple = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class A__ ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert')
a__ : Optional[int] = 13
a__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
])
a__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size)
a__ : str = random_attention_mask([batch_size, 4])
a__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowercase ( self , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : str = TFCLIPVisionModel(lowercase , name='vision_model')
a__ : str = TFBertModel(lowercase , name='text_model')
return vision_model, text_model
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = TFCLIPVisionModelTester(self)
a__ : Dict = TFBertModelTester(self)
a__ : Optional[Any] = clip_model_tester.prepare_config_and_inputs()
a__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
a__ , a__ : Union[str, Any] = vision_config_and_inputs
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : Optional[int] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=lowercase)
a__ : str = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian')
a__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a__ : Optional[Any] = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=lowercase , padding=lowercase , return_tensors='np')
a__ : int = model(**lowercase)
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]))
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
a__ : List[str] = np.array([[1.2_28_47_27, 0.3_10_41_22]])
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase , atol=1e-3))
| 225
| 1
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
_lowerCamelCase : Dict = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
_lowerCamelCase : Any = dataset.iloc[:, 1:2].values
_lowerCamelCase : List[Any] = dataset.iloc[:, 2].values
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
_lowerCamelCase : List[Any] = PolynomialFeatures(degree=4)
_lowerCamelCase : Optional[Any] = poly_reg.fit_transform(X)
_lowerCamelCase : Tuple = LinearRegression()
pol_reg.fit(X_poly, y)
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
plt.scatter(lowercase_ , lowercase_ , color='''red''' )
plt.plot(lowercase_ , pol_reg.predict(poly_reg.fit_transform(lowercase_ ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 14
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> "list[int]":
"""simple docstring"""
if upper_limit < 0:
raise ValueError('''Limit for the Catalan sequence must be ≥ 0''' )
A__ = [0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
A__ = 1
if upper_limit > 0:
A__ = 1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2 , upper_limit + 1 ):
for j in range(lowercase_ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print("""\n********* Catalan Numbers Using Dynamic Programming ************\n""")
print("""\n*** Enter -1 at any time to quit ***""")
print("""\nEnter the upper limit (≥ 0) for the Catalan number sequence: """, end="""""")
try:
while True:
_lowerCamelCase : List[Any] = int(input().strip())
if N < 0:
print("""\n********* Goodbye!! ************""")
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print("""Try another upper limit for the sequence: """, end="""""")
except (NameError, ValueError):
print("""\n********* Invalid input, goodbye! ************\n""")
import doctest
doctest.testmod()
| 14
| 1
|
"""simple docstring"""
def lowercase ( A_ = 1_000_000 )-> int:
'''simple docstring'''
a : List[Any] = set(range(3 , A_ , 2 ) )
primes.add(2 )
for p in range(3 , A_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , A_ , A_ ) ) )
a : str = [float(A_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(A_ , limit + 1 , A_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 353
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : List[Any]):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 226
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
"""simple docstring"""
A__ = XCLIPTextConfig()
# derive patch size from model name
A__ = model_name.find('''patch''' )
A__ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
A__ = XCLIPVisionConfig(patch_size=lowercase_ , num_frames=lowercase_ )
if "large" in model_name:
A__ = 768
A__ = 3_072
A__ = 12
A__ = 1_024
A__ = 4_096
A__ = 16
A__ = 24
A__ = 768
A__ = 3_072
if model_name == "xclip-large-patch14-16-frames":
A__ = 336
A__ = XCLIPConfig.from_text_vision_configs(lowercase_ , lowercase_ )
if "large" in model_name:
A__ = 768
return config
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
if name == "token_embedding.weight":
A__ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
A__ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
A__ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
A__ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
A__ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
A__ = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
A__ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
A__ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
A__ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
A__ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
A__ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
A__ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
A__ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
A__ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
A__ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
A__ = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
A__ = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
A__ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
A__ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
A__ = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
A__ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
A__ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(lowercase_ )
if "attn.in_proj" in key:
A__ = key.split('''.''' )
if key.startswith('''visual''' ):
A__ = key_split[3]
A__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
A__ = val[
:dim, :
]
A__ = val[
dim : dim * 2, :
]
A__ = val[
-dim:, :
]
else:
A__ = val[
:dim
]
A__ = val[
dim : dim * 2
]
A__ = val[
-dim:
]
else:
if "weight" in key:
A__ = val[
:dim, :
]
A__ = val[
dim : dim * 2, :
]
A__ = val[
-dim:, :
]
else:
A__ = val[:dim]
A__ = val[
dim : dim * 2
]
A__ = val[-dim:]
elif key.startswith('''mit''' ):
A__ = key_split[2]
A__ = config.vision_config.mit_hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = key_split[2]
A__ = config.text_config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[
dim : dim * 2
]
A__ = val[-dim:]
else:
A__ = rename_key(lowercase_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
A__ = val.T
A__ = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> str:
"""simple docstring"""
if num_frames == 8:
A__ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
A__ = '''eating_spaghetti.npy'''
elif num_frames == 32:
A__ = '''eating_spaghetti_32_frames.npy'''
A__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=lowercase_ , repo_type='''dataset''' , )
A__ = np.load(lowercase_ )
return list(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=None , lowercase_=False ) -> str:
"""simple docstring"""
A__ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
A__ = model_to_url[model_name]
A__ = 8
if "16-frames" in model_name:
A__ = 16
elif "shot" in model_name:
A__ = 32
A__ = get_xclip_config(lowercase_ , lowercase_ )
A__ = XCLIPModel(lowercase_ )
model.eval()
if "drive" in checkpoint_url:
A__ = '''pytorch_model.bin'''
gdown.cached_download(lowercase_ , lowercase_ , quiet=lowercase_ )
A__ = torch.load(lowercase_ , map_location='''cpu''' )['''model''']
else:
A__ = torch.hub.load_state_dict_from_url(lowercase_ )['''model''']
A__ = convert_state_dict(lowercase_ , lowercase_ )
A__ = XCLIPModel(lowercase_ )
A__ , A__ = model.load_state_dict(lowercase_ , strict=lowercase_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
A__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
A__ = VideoMAEImageProcessor(size=lowercase_ )
A__ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
A__ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
A__ = XCLIPProcessor(image_processor=lowercase_ , tokenizer=lowercase_ )
A__ = prepare_video(lowercase_ )
A__ = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=lowercase_ , return_tensors='''pt''' , padding=lowercase_ )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
A__ = model(**lowercase_ )
# Verify outputs
A__ = outputs.logits_per_video
A__ = logits_per_video.softmax(dim=1 )
print('''Probs:''' , lowercase_ )
# kinetics-400
if model_name == "xclip-base-patch32":
A__ = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
A__ = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
A__ = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
A__ = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
A__ = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
A__ = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
A__ = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
A__ = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
A__ = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
A__ = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
A__ = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
A__ = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
A__ = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
A__ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
A__ = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
A__ = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
A__ = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
A__ = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(lowercase_ , organization='''nielsr''' )
processor.push_to_hub(lowercase_ , organization='''nielsr''' )
slow_tokenizer.push_to_hub(lowercase_ , organization='''nielsr''' )
if __name__ == "__main__":
_lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Dict:
"""simple docstring"""
A__ = args.pruning_method
A__ = args.threshold
A__ = args.model_name_or_path.rstrip('''/''' )
A__ = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
A__ = torch.load(os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
A__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
A__ = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
A__ = MagnitudeBinarizer.apply(inputs=lowercase_ , threshold=lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = TopKBinarizer.apply(lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ = ThresholdBinarizer.apply(lowercase_ , lowercase_ , lowercase_ )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ = name[:-6]
A__ = model[f"""{prefix_}mask_scores"""]
A__ , A__ = -0.1, 1.1
A__ = torch.sigmoid(lowercase_ )
A__ = s * (r - l) + l
A__ = s_bar.clamp(min=0.0 , max=1.0 )
A__ = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('''Unknown pruning method''' )
if target_model_path is None:
A__ = os.path.join(
os.path.dirname(lowercase_ ) , f"""bertarized_{os.path.basename(lowercase_ )}""" )
if not os.path.isdir(lowercase_ ):
shutil.copytree(lowercase_ , lowercase_ )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(lowercase_ , os.path.join(lowercase_ , '''pytorch_model.bin''' ) )
print('''\nPruned model saved! See you later!''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pruning_method""",
choices=["""l0""", """magnitude""", """topK""", """sigmoied_threshold"""],
type=str,
required=True,
help=(
"""Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"""
""" sigmoied_threshold = Soft movement pruning)"""
),
)
parser.add_argument(
"""--threshold""",
type=float,
required=False,
help=(
"""For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."""
"""For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."""
"""Not needed for `l0`"""
),
)
parser.add_argument(
"""--model_name_or_path""",
type=str,
required=True,
help="""Folder containing the model that was previously fine-pruned""",
)
parser.add_argument(
"""--target_model_path""",
default=None,
type=str,
required=False,
help="""Folder containing the model that was previously fine-pruned""",
)
_lowerCamelCase : int = parser.parse_args()
main(args)
| 14
| 1
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
_lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Tuple , snake_case :Optional[int] , snake_case :List[Any] , snake_case :Union[str, Any] , snake_case :Union[str, Any]=None ):
'''simple docstring'''
super().__init__(
snake_case , question_encoder_tokenizer=snake_case , generator_tokenizer=snake_case , index=snake_case , init_retrieval=snake_case , )
A_ : Optional[Any] = None
def SCREAMING_SNAKE_CASE ( self :int , snake_case :int ):
'''simple docstring'''
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
A_ : str = self._infer_socket_ifname()
# avoid clash with the NCCL port
A_ : Dict = str(distributed_port + 1 )
A_ : Optional[int] = dist.new_group(ranks=snake_case , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :Optional[int] , snake_case :Tuple , snake_case :List[Any]=torch.floataa ):
'''simple docstring'''
A_ : str = torch.empty(snake_case , dtype=snake_case )
dist.scatter(snake_case , src=0 , scatter_list=snake_case , group=self.process_group )
return target_tensor
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Optional[int] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
A_ : str = next((addr for addr in addrs if addr.startswith("e" )) , snake_case )
return ifname
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :np.ndarray , snake_case :int ):
'''simple docstring'''
if not dist.is_initialized():
A_ , A_ : List[str] = self._main_retrieve(snake_case , snake_case )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(snake_case )
# distributed training
A_ : List[str] = dist.get_world_size(group=self.process_group )
# gather logic
A_ : List[str] = None
if self._is_main():
A_ : Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(snake_case )]
dist.gather(torch.tensor(snake_case ) , dst=0 , gather_list=snake_case , group=self.process_group )
# scatter logic
A_ : Optional[Any] = question_hidden_states.shape[0]
A_ : List[str] = []
A_ : Dict = []
if self._is_main():
assert len(snake_case ) == world_size
A_ , A_ : int = self._main_retrieve(torch.cat(snake_case ).numpy() , snake_case )
A_ , A_ : Tuple = torch.tensor(snake_case ), torch.tensor(snake_case )
A_ : Tuple = self._chunk_tensor(snake_case , snake_case )
A_ : Any = self._chunk_tensor(snake_case , snake_case )
A_ : List[Any] = self._scattered(snake_case , [n_queries, n_docs] , target_type=torch.intaa )
A_ : Union[str, Any] = self._scattered(snake_case , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(snake_case )
| 70
|
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 70
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__a = TypeVar("T")
__a = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : T | None , snake_case_ : U | None ):
snake_case__ : Dict = key
snake_case__ : Tuple = val
snake_case__ : DoubleLinkedListNode[T, U] | None = None
snake_case__ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : int ):
return (
f"Node: key: {self.key}, val: {self.val}, "
f"has next: {bool(self.next )}, has prev: {bool(self.prev )}"
)
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(snake_case_ , snake_case_ )
snake_case__ , snake_case__ : Tuple = self.rear, self.head
def __repr__( self : List[str] ):
snake_case__ : Dict = ["""DoubleLinkedList"""]
snake_case__ : Any = self.head
while node.next is not None:
rep.append(str(snake_case_ ) )
snake_case__ : List[str] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : DoubleLinkedListNode[T, U] ):
snake_case__ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case__ : Tuple = node
snake_case__ : Optional[Any] = previous
snake_case__ : int = node
snake_case__ : List[str] = self.rear
def lowerCamelCase ( self : List[str] , snake_case_ : DoubleLinkedListNode[T, U] ):
if node.prev is None or node.next is None:
return None
snake_case__ : Optional[int] = node.next
snake_case__ : List[str] = node.prev
snake_case__ : Optional[int] = None
snake_case__ : Optional[Any] = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
"""simple docstring"""
lowercase = {}
def __init__( self : str , snake_case_ : int ):
snake_case__ : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case__ : Tuple = capacity
snake_case__ : Dict = 0
snake_case__ : Dict = 0
snake_case__ : List[Any] = 0
snake_case__ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Tuple ):
return (
f"CacheInfo(hits={self.hits}, misses={self.miss}, "
f"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : Optional[int] , snake_case_ : T ):
return key in self.cache
def lowerCamelCase ( self : Dict , snake_case_ : T ):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case__ : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case__ : List[str] = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(snake_case_ )
return node.val
self.miss += 1
return None
def lowerCamelCase ( self : List[str] , snake_case_ : T , snake_case_ : U ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case__ : Tuple = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(snake_case_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case__ : Optional[int] = DoubleLinkedListNode(snake_case_ , snake_case_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case__ : List[Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
snake_case__ : int = value
self.list.add(snake_case_ )
@classmethod
def lowerCamelCase ( cls : Dict , snake_case_ : int = 128 ):
def cache_decorator_inner(snake_case_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*snake_case_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case__ : Union[str, Any] = LRUCache(snake_case_ )
snake_case__ : str = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
snake_case__ : Any = func(*snake_case_ )
cls.decorator_function_to_instance_map[func].put(args[0] , snake_case_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(snake_case_ , """cache_info""" , snake_case_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 35
| 1
|
from ... import PretrainedConfig
__UpperCAmelCase : Any = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
__UpperCamelCase : Optional[int] = "nezha"
def __init__( self , __SCREAMING_SNAKE_CASE=21_128 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=64 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : int = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : int = hidden_act
UpperCamelCase : Any = intermediate_size
UpperCamelCase : int = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : List[Any] = max_relative_position
UpperCamelCase : str = type_vocab_size
UpperCamelCase : Optional[int] = initializer_range
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : Dict = classifier_dropout
UpperCamelCase : List[Any] = use_cache
| 315
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def a ( SCREAMING_SNAKE_CASE_ : dict ):
"""simple docstring"""
return (data["data"], data["target"])
def a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
UpperCamelCase : Any = xgb.predict(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def a ( ):
"""simple docstring"""
UpperCamelCase : Tuple = fetch_california_housing()
UpperCamelCase , UpperCamelCase : Tuple = data_handling(SCREAMING_SNAKE_CASE_ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
UpperCamelCase : Optional[Any] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(F"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 315
| 1
|
from math import sqrt
def UpperCAmelCase_ ( __UpperCAmelCase : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( __UpperCAmelCase : int = 1_00_01 ) -> int:
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
while count != nth and number < 3:
number += 1
if is_prime(__UpperCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__UpperCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 225
|
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Tuple=13 , _lowerCAmelCase : List[str]=7 , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : Any=True , _lowerCAmelCase : str=99 , _lowerCAmelCase : List[str]=32 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : str=37 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Optional[int]=512 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Tuple=0.02 , _lowerCAmelCase : Tuple=3 , _lowerCAmelCase : Dict=4 , _lowerCAmelCase : Union[str, Any]=None , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : Optional[int] ):
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = LlamaModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Any , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , ):
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , ):
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
SCREAMING_SNAKE_CASE_ = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , encoder_attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['hidden_states'][0]
# select random slice
SCREAMING_SNAKE_CASE_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-3 ) )
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) , (
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
lowercase_ = (LlamaForCausalLM,) if is_torch_available() else ()
lowercase_ = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
def lowerCAmelCase_ ( self : Dict ):
SCREAMING_SNAKE_CASE_ = LlamaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'single_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[Any] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = 3
SCREAMING_SNAKE_CASE_ = 'multi_label_classification'
SCREAMING_SNAKE_CASE_ = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ = input_ids.ne(1 ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
SCREAMING_SNAKE_CASE_ = LlamaForSequenceClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip('LLaMA buffers include complex numbers, which breaks this test' )
def lowerCAmelCase_ ( self : int ):
pass
@parameterized.expand([('linear',), ('dynamic',)] )
def lowerCAmelCase_ ( self : str , _lowerCAmelCase : Tuple ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = ids_tensor([1, 10] , config.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
original_model.to(_lowerCAmelCase )
original_model.eval()
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = original_model(_lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
SCREAMING_SNAKE_CASE_ = {'type': scaling_type, 'factor': 10.0}
SCREAMING_SNAKE_CASE_ = LlamaModel(_lowerCAmelCase )
scaled_model.to(_lowerCAmelCase )
scaled_model.eval()
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
SCREAMING_SNAKE_CASE_ = scaled_model(_lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1E-5 ) )
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-7b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Logits are not exactly the same, once we fix the instabalities somehow, will update!' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-13b-chat-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
# Expected mean on dim = -1
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
'Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test' )
@slow
def lowerCAmelCase_ ( self : int ):
SCREAMING_SNAKE_CASE_ = [1, 306, 4_658, 278, 6_593, 310, 2_834, 338]
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained('meta-llama/Llama-2-70b-hf' , device_map='auto' )
SCREAMING_SNAKE_CASE_ = model(torch.tensor(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , _lowerCAmelCase , atol=1E-2 , rtol=1E-2 )
# fmt: off
SCREAMING_SNAKE_CASE_ = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , _lowerCAmelCase , atol=1E-5 , rtol=1E-5 )
@unittest.skip('Model is curently gated' )
@slow
def lowerCAmelCase_ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the "princi'
SCREAMING_SNAKE_CASE_ = 'Simply put, the theory of relativity states that '
SCREAMING_SNAKE_CASE_ = LlamaTokenizer.from_pretrained('meta-llama/Llama-2-13b-chat-hf' )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(_lowerCAmelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = LlamaForCausalLM.from_pretrained(
'meta-llama/Llama-2-13b-chat-hf' , device_map='sequential' , use_safetensors=_lowerCAmelCase )
# greedy generation outputs
SCREAMING_SNAKE_CASE_ = model.generate(_lowerCAmelCase , max_new_tokens=64 , top_p=_lowerCAmelCase , temperature=1 , do_sample=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(generated_ids[0] , skip_special_tokens=_lowerCAmelCase )
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
| 225
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358
|
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCamelCase :
def __init__(self ) -> None:
UpperCamelCase = [2, 1, 2, -1]
UpperCamelCase = [1, 2, 3, 4]
def snake_case_ (self ) -> list[float]:
UpperCamelCase = len(self.first_signal )
UpperCamelCase = len(self.second_signal )
UpperCamelCase = max(__a , __a )
# create a zero matrix of max_length x max_length
UpperCamelCase = [[0] * max_length for i in range(__a )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(__a ):
UpperCamelCase = deque(self.second_signal )
rotated_signal.rotate(__a )
for j, item in enumerate(__a ):
matrix[i][j] += item
# multiply the matrix with the first signal
UpperCamelCase = np.matmul(np.transpose(__a ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(__a , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 244
| 0
|
"""simple docstring"""
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : str = logging.get_logger(__name__)
class lowerCAmelCase_ (__UpperCamelCase ):
"""simple docstring"""
__UpperCamelCase : List[str] = ['''input_ids''', '''attention_mask''']
def __init__(self , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__=1_25 , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [F'''<extra_id_{i}>''' for i in range(a_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE__ : Optional[Any] = len(set(filter(lambda SCREAMING_SNAKE_CASE__ : bool("""extra_id""" in str(a_ ) ) , a_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
""" provided to ByT5Tokenizer. In this case the additional_special_tokens must include the"""
""" extra_ids tokens""" )
SCREAMING_SNAKE_CASE__ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else pad_token
SCREAMING_SNAKE_CASE__ : str = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else eos_token
SCREAMING_SNAKE_CASE__ : int = AddedToken(a_ , lstrip=a_ , rstrip=a_ ) if isinstance(a_ , a_ ) else unk_token
super().__init__(
eos_token=a_ , unk_token=a_ , pad_token=a_ , extra_ids=a_ , additional_special_tokens=a_ , **a_ , )
SCREAMING_SNAKE_CASE__ : Dict = extra_ids
SCREAMING_SNAKE_CASE__ : Any = 2**8 # utf is 8 bits
# define special tokens dict
SCREAMING_SNAKE_CASE__ : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.special_tokens_encoder )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(a_ )
for i, token in enumerate(a_ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vocab_size + i - n
SCREAMING_SNAKE_CASE__ : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False ) -> Any:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_ , token_ids_a=a_ , already_has_special_tokens=a_ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a_ )) + [1]
return ([0] * len(a_ )) + [1] + ([0] * len(a_ )) + [1]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
if len(a_ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'''This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'''
""" eos tokens being added.""" )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._add_eos_if_not_present(a_ )
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE__ : List[str] = self._add_eos_if_not_present(a_ )
return token_ids_a + token_ids_a
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [chr(a_ ) for i in text.encode("""utf-8""" )]
return tokens
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> str:
"""simple docstring"""
if token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.added_tokens_encoder[token]
elif len(a_ ) != 1:
SCREAMING_SNAKE_CASE__ : int = self.unk_token_id
else:
SCREAMING_SNAKE_CASE__ : List[str] = ord(a_ ) + self._num_special_tokens
return token_id
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
if index in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE__ : Dict = self.special_tokens_decoder[index]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = chr(index - self._num_special_tokens )
return token
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = b''''''
for token in tokens:
if token in self.special_tokens_decoder:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.added_tokens_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = self.special_tokens_decoder[token].encode("""utf-8""" )
elif token in self.special_tokens_encoder:
SCREAMING_SNAKE_CASE__ : Tuple = token.encode("""utf-8""" )
elif token in self.added_tokens_encoder:
SCREAMING_SNAKE_CASE__ : int = token.encode("""utf-8""" )
else:
SCREAMING_SNAKE_CASE__ : Any = bytes([ord(a_ )] )
bstring += tok_string
SCREAMING_SNAKE_CASE__ : Dict = bstring.decode("""utf-8""" , errors="""ignore""" )
return string
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Union[str, Any]:
"""simple docstring"""
return ()
| 25
|
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : str = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__UpperCAmelCase : Dict = len(_UpperCAmelCase ) // 2
__UpperCAmelCase : Union[str, Any] = arr[0:mid]
__UpperCAmelCase : Optional[Any] = arr[mid:]
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCAmelCase : List[str] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCAmelCase : Any = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
# an empty list should also have zero inversions
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226
| 0
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowerCamelCase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_lowerCamelCase = {
'allenai/led-base-16384': 1_63_84,
}
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : Tuple = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Optional[int] = LEDTokenizer
lowerCAmelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] , __snake_case : List[str]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : Dict="replace" , __snake_case : Tuple="<s>" , __snake_case : Optional[Any]="</s>" , __snake_case : Optional[int]="</s>" , __snake_case : int="<s>" , __snake_case : Any="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : Optional[Any]="<mask>" , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=True , **__snake_case : List[str] , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , errors=__snake_case , bos_token=__snake_case , eos_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , unk_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , add_prefix_space=__snake_case , trim_offsets=__snake_case , **__snake_case , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space:
UpperCAmelCase_ = getattr(__snake_case , pre_tok_state.pop('''type''' ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**__snake_case )
UpperCAmelCase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
UpperCAmelCase_ = '''post_processor'''
UpperCAmelCase_ = getattr(self.backend_tokenizer , __snake_case , __snake_case )
if tokenizer_component_instance:
UpperCAmelCase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCAmelCase_ = tuple(state['''sep'''] )
if "cls" in state:
UpperCAmelCase_ = tuple(state['''cls'''] )
UpperCAmelCase_ = False
if state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space:
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = True
if state.get('''trim_offsets''' , __snake_case ) != trim_offsets:
UpperCAmelCase_ = trim_offsets
UpperCAmelCase_ = True
if changes_to_apply:
UpperCAmelCase_ = getattr(__snake_case , state.pop('''type''' ) )
UpperCAmelCase_ = component_class(**__snake_case )
setattr(self.backend_tokenizer , __snake_case , __snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self : Tuple ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self : int , __snake_case : Any ):
UpperCAmelCase_ = AddedToken(__snake_case , lstrip=__snake_case , rstrip=__snake_case ) if isinstance(__snake_case , __snake_case ) else value
UpperCAmelCase_ = value
def lowerCamelCase_ ( self : Tuple , *__snake_case : Union[str, Any] , **__snake_case : int ):
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , *__snake_case : Optional[Any] , **__snake_case : Optional[Any] ):
UpperCAmelCase_ = kwargs.get('''is_split_into_words''' , __snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Dict , __snake_case : str , __snake_case : Optional[str] = None ):
UpperCAmelCase_ = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowerCamelCase_ ( self : str , __snake_case : Any , __snake_case : str=None ):
UpperCAmelCase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Union[Dict[str, EncodedInput], BatchEncoding] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ):
UpperCAmelCase_ = super()._pad(
encoded_inputs=__snake_case , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
# Load from model defaults
if return_attention_mask is None:
UpperCAmelCase_ = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
UpperCAmelCase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
UpperCAmelCase_ = len(encoded_inputs['''global_attention_mask'''] ) != len(__snake_case )
if needs_to_be_padded:
UpperCAmelCase_ = len(__snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
UpperCAmelCase_ = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
UpperCAmelCase_ = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 177
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase_ = len(__UpperCamelCase )
while cur > 1:
# Find the maximum number in arr
UpperCAmelCase_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
UpperCAmelCase_ = arr[mi::-1] + arr[mi + 1 : len(__UpperCamelCase )]
# Reverse whole list
UpperCAmelCase_ = arr[cur - 1 :: -1] + arr[cur : len(__UpperCamelCase )]
cur -= 1
return arr
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(pancake_sort(unsorted))
| 177
| 1
|
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase :
def __init__( self : int ) -> Optional[Any]:
_lowerCAmelCase = []
def lowercase__ ( self : List[str] , __snake_case : Dict ) -> List[Any]:
return self.node_position[vertex]
def lowercase__ ( self : List[Any] , __snake_case : int , __snake_case : Optional[Any] ) -> List[Any]:
_lowerCAmelCase = pos
def lowercase__ ( self : Optional[int] , __snake_case : Tuple , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] ) -> Dict:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_lowerCAmelCase = 2 * start + 1
else:
_lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_lowerCAmelCase , _lowerCAmelCase = heap[smallest_child], positions[smallest_child]
_lowerCAmelCase , _lowerCAmelCase = (
heap[start],
positions[start],
)
_lowerCAmelCase , _lowerCAmelCase = temp, tempa
_lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __snake_case )
self.top_to_bottom(__snake_case , __snake_case , __snake_case , __snake_case )
def lowercase__ ( self : Dict , __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Tuple , __snake_case : Optional[int] ) -> Union[str, Any]:
_lowerCAmelCase = position[index]
while index != 0:
_lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_lowerCAmelCase = heap[parent]
_lowerCAmelCase = position[parent]
self.set_position(position[parent] , __snake_case )
else:
_lowerCAmelCase = val
_lowerCAmelCase = temp
self.set_position(__snake_case , __snake_case )
break
_lowerCAmelCase = parent
else:
_lowerCAmelCase = val
_lowerCAmelCase = temp
self.set_position(__snake_case , 0 )
def lowercase__ ( self : Any , __snake_case : int , __snake_case : List[Any] ) -> Dict:
_lowerCAmelCase = len(__snake_case ) // 2 - 1
for i in range(__snake_case , -1 , -1 ):
self.top_to_bottom(__snake_case , __snake_case , len(__snake_case ) , __snake_case )
def lowercase__ ( self : List[Any] , __snake_case : Dict , __snake_case : Tuple ) -> Optional[Any]:
_lowerCAmelCase = positions[0]
_lowerCAmelCase = sys.maxsize
self.top_to_bottom(__snake_case , 0 , len(__snake_case ) , __snake_case )
return temp
def UpperCamelCase__ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase = Heap()
_lowerCAmelCase = [0] * len(lowerCAmelCase )
_lowerCAmelCase = [-1] * len(lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_lowerCAmelCase = []
for vertex in range(len(lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCAmelCase )
heap.node_position.append(lowerCAmelCase )
_lowerCAmelCase = []
_lowerCAmelCase = 1
_lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_lowerCAmelCase = 0
_lowerCAmelCase = distance
heap.heapify(lowerCAmelCase , lowerCAmelCase )
for _ in range(1 , len(lowerCAmelCase ) ):
_lowerCAmelCase = heap.delete_minimum(lowerCAmelCase , lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCAmelCase )]
):
_lowerCAmelCase = distance
heap.bottom_to_top(
lowerCAmelCase , heap.get_position(lowerCAmelCase ) , lowerCAmelCase , lowerCAmelCase )
_lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple =int(input('''Enter number of edges: ''').strip())
A__ : Dict =defaultdict(list)
for _ in range(edges_number):
A__ : Any =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 70
|
'''simple docstring'''
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
A__ : Dict ='''2.13.1'''
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('''3.7'''):
raise ImportWarning(
'''To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'''
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'''To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'''
'''If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'''
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
A__ : Tuple =concatenate_datasets
A__ : Dict =DownloadConfig
A__ : int =DownloadManager
A__ : Union[str, Any] =DownloadMode
A__ : Tuple =DownloadConfig
A__ : Optional[Any] =DownloadMode
A__ : str =DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 70
| 1
|
"""simple docstring"""
from timeit import timeit
def UpperCamelCase (lowercase_: int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : List[str] = 0
while number:
number &= number - 1
result += 1
return result
def UpperCamelCase (lowercase_: int ) -> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
A__ : int = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def UpperCamelCase () -> None:
def do_benchmark(lowercase_: int ) -> None:
A__ : List[Any] = """import __main__ as z"""
print(f"""Benchmark when {number = }:""" )
print(f"""{get_set_bits_count_using_modulo_operator(lowercase_ ) = }""" )
A__ : Union[str, Any] = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=lowercase_ )
print(f"""timeit() runs in {timing} seconds""" )
print(f"""{get_set_bits_count_using_brian_kernighans_algorithm(lowercase_ ) = }""" )
A__ : List[Any] = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=lowercase_ , )
print(f"""timeit() runs in {timing} seconds""" )
for number in (25, 37, 58, 0):
do_benchmark(lowercase_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 368
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase (lowercase_: str , lowercase_: Optional[int] ) -> str:
A__ : Union[str, Any] = old_name
if "patch_embed" in old_name:
A__ , A__ , A__ : Any = old_name.split(""".""" )
if layer == "0":
A__ : List[Any] = old_name.replace("""0""" , """convolution1""" )
elif layer == "1":
A__ : Optional[int] = old_name.replace("""1""" , """batchnorm_before""" )
elif layer == "3":
A__ : int = old_name.replace("""3""" , """convolution2""" )
else:
A__ : Dict = old_name.replace("""4""" , """batchnorm_after""" )
if "network" in old_name and re.search(r"""\d\.\d""" , lowercase_ ):
A__ : str = r"""\b\d{2}\b"""
if bool(re.search(lowercase_ , lowercase_ ) ):
A__ : Optional[Any] = re.search(r"""\d\.\d\d.""" , lowercase_ ).group()
else:
A__ : int = re.search(r"""\d\.\d.""" , lowercase_ ).group()
if int(match[0] ) < 6:
A__ : Optional[Any] = old_name.replace(lowercase_ , """""" )
A__ : Tuple = trimmed_name.replace("""network""" , match[0] + """.meta4D_layers.blocks.""" + match[2:-1] )
A__ : int = """intermediate_stages.""" + trimmed_name
else:
A__ : Dict = old_name.replace(lowercase_ , """""" )
if int(match[2] ) < num_meta4D_last_stage:
A__ : Optional[int] = trimmed_name.replace("""network""" , """meta4D_layers.blocks.""" + match[2] )
else:
A__ : Optional[Any] = str(int(match[2] ) - num_meta4D_last_stage )
A__ : Dict = trimmed_name.replace("""network""" , """meta3D_layers.blocks.""" + layer_index )
if "norm1" in old_name:
A__ : str = trimmed_name.replace("""norm1""" , """layernorm1""" )
elif "norm2" in old_name:
A__ : Optional[int] = trimmed_name.replace("""norm2""" , """layernorm2""" )
elif "fc1" in old_name:
A__ : List[Any] = trimmed_name.replace("""fc1""" , """linear_in""" )
elif "fc2" in old_name:
A__ : Optional[Any] = trimmed_name.replace("""fc2""" , """linear_out""" )
A__ : str = """last_stage.""" + trimmed_name
elif "network" in old_name and re.search(r""".\d.""" , lowercase_ ):
A__ : List[str] = old_name.replace("""network""" , """intermediate_stages""" )
if "fc" in new_name:
A__ : Optional[int] = new_name.replace("""fc""" , """convolution""" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : Optional[int] = new_name.replace("""norm1""" , """batchnorm_before""" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : int = new_name.replace("""norm2""" , """batchnorm_after""" )
if "proj" in new_name:
A__ : Tuple = new_name.replace("""proj""" , """projection""" )
if "dist_head" in new_name:
A__ : Optional[int] = new_name.replace("""dist_head""" , """distillation_classifier""" )
elif "head" in new_name:
A__ : Optional[Any] = new_name.replace("""head""" , """classifier""" )
elif "patch_embed" in new_name:
A__ : Optional[Any] = """efficientformer.""" + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Union[str, Any] = new_name.replace("""norm""" , """layernorm""" )
A__ : Union[str, Any] = """efficientformer.""" + new_name
else:
A__ : int = """efficientformer.encoder.""" + new_name
return new_name
def UpperCamelCase (lowercase_: Optional[Any] , lowercase_: Union[str, Any] ) -> Tuple:
for key in checkpoint.copy().keys():
A__ : List[Any] = checkpoint.pop(lowercase_ )
A__ : Dict = val
return checkpoint
def UpperCamelCase () -> Optional[int]:
A__ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ : List[str] = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw )
return image
def UpperCamelCase (lowercase_: Path , lowercase_: Path , lowercase_: Path , lowercase_: bool ) -> Tuple:
A__ : Any = torch.load(lowercase_ , map_location="""cpu""" )["""model"""]
A__ : List[Any] = EfficientFormerConfig.from_json_file(lowercase_ )
A__ : Any = EfficientFormerForImageClassificationWithTeacher(lowercase_ )
A__ : List[str] = """_""".join(checkpoint_path.split("""/""" )[-1].split(""".""" )[0].split("""_""" )[:-1] )
A__ : Union[str, Any] = config.depths[-1] - config.num_metaad_blocks + 1
A__ : Any = convert_torch_checkpoint(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
A__ : Tuple = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
# prepare image
A__ : Optional[int] = prepare_img()
A__ : Optional[Any] = 256
A__ : str = 224
A__ : List[str] = EfficientFormerImageProcessor(
size={"""shortest_edge""": image_size} , crop_size={"""height""": crop_size, """width""": crop_size} , resample=pillow_resamplings["""bicubic"""] , )
A__ : Tuple = processor(images=lowercase_ , return_tensors="""pt""" ).pixel_values
# original processing pipeline
A__ : List[Any] = Compose(
[
Resize(lowercase_ , interpolation=pillow_resamplings["""bicubic"""] ),
CenterCrop(lowercase_ ),
ToTensor(),
Normalize(lowercase_ , lowercase_ ),
] )
A__ : Any = image_transforms(lowercase_ ).unsqueeze(0 )
assert torch.allclose(lowercase_ , lowercase_ )
A__ : Optional[int] = model(lowercase_ )
A__ : List[str] = outputs.logits
A__ : Tuple = (1, 1000)
if "l1" in model_name:
A__ : List[str] = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Any = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , lowercase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Union[str, Any] = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(lowercase_ )
print(f"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("""Pushing model to the hub...""" )
model.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add model""" , use_temp_dir=lowercase_ , )
processor.push_to_hub(
repo_id=f"""Bearnardd/{pytorch_dump_path}""" , commit_message="""Add image processor""" , use_temp_dir=lowercase_ , )
if __name__ == "__main__":
A_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
A_ : List[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 141
| 0
|
"""simple docstring"""
import math
class lowercase_ :
'''simple docstring'''
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : list[list[float]] , _UpperCAmelCase : list[int] ):
_A = 0.0
_A = 0.0
for i in range(len(_UpperCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase_ ( self : str , _UpperCAmelCase : list[list[int | float]] , _UpperCAmelCase : list[int] , _UpperCAmelCase : int , _UpperCAmelCase : float ):
for i in range(len(_UpperCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _snake_case ( ) -> None:
'''simple docstring'''
_A = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_A = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_A = SelfOrganizingMap()
_A = 3
_A = 0.5
for _ in range(_snake_case ):
for j in range(len(_snake_case ) ):
# training sample
_A = training_samples[j]
# Compute the winning vector
_A = self_organizing_map.get_winner(_snake_case , _snake_case )
# Update the winning vector
_A = self_organizing_map.update(_snake_case , _snake_case , _snake_case , _snake_case )
# classify test sample
_A = [0, 0, 0, 1]
_A = self_organizing_map.get_winner(_snake_case , _snake_case )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main()
| 315
|
"""simple docstring"""
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , *_UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : List[Any] ):
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
_A = eval_examples
_A = post_process_function
def lowerCAmelCase_ ( self : Tuple , _UpperCAmelCase : Optional[Any]=None , _UpperCAmelCase : str=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : str = "eval" ):
_A = self.eval_dataset if eval_dataset is None else eval_dataset
_A = self.get_eval_dataloader(_UpperCAmelCase )
_A = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
else:
_A = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_A = self.callback_handler.on_evaluate(self.args , self.state , self.control , _UpperCAmelCase )
return metrics
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=None , _UpperCAmelCase : str = "test" ):
_A = self.get_test_dataloader(_UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
_A = self.compute_metrics
_A = None
_A = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
_A = time.time()
try:
_A = eval_loop(
_UpperCAmelCase , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_UpperCAmelCase , metric_key_prefix=_UpperCAmelCase , )
finally:
_A = compute_metrics
_A = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_UpperCAmelCase , _UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_A = self.post_process_function(_UpperCAmelCase , _UpperCAmelCase , output.predictions , 'predict' )
_A = self.compute_metrics(_UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
_A = metrics.pop(_UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_UpperCAmelCase )
| 315
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase : Optional[int] = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
UpperCAmelCase : Tuple = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
UpperCAmelCase : Optional[Any] = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
UpperCAmelCase : Dict = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 148
|
UpperCAmelCase : Dict = [0, 2, 4, 6, 8]
UpperCAmelCase : Tuple = [1, 3, 5, 7, 9]
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
a__ : str =0
for digit in range(10 ):
a__ : int =digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return result
a__ : List[str] =0
for digita in range(10 ):
a__ : Optional[int] =digita
if (remainder + digita) % 2 == 0:
a__ : Dict =ODD_DIGITS
else:
a__ : Any =EVEN_DIGITS
for digita in other_parity_digits:
a__ : Union[str, Any] =digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
return result
def _A ( SCREAMING_SNAKE_CASE : int = 9 ):
"""simple docstring"""
a__ : List[str] =0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(SCREAMING_SNAKE_CASE , 0 , [0] * length , SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 148
| 1
|
"""simple docstring"""
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class lowercase :
def _snake_case ( self , lowercase , lowercase ) -> List[str]:
pass
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Union[str, Any]:
pass
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = np.abs((a - b) ).max()
self.assertLessEqual(lowercase , lowercase , f'Difference between torch and flax is {diff} (>= {tol}).' )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[int]:
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel(lowercase )
lowerCAmelCase = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], config.projection_dim) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(lowercase , lowercase )
lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase )
lowerCAmelCase = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
self.assertEqual(output["""text_embeds"""].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output["""image_embeds"""].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(lowercase , lowercase )
lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase )
lowerCAmelCase = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
lowerCAmelCase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase )
lowerCAmelCase = model(input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase )
lowerCAmelCase = after_output[0]
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1e-3 )
def _snake_case ( self , lowercase , lowercase , lowercase , lowercase , lowercase=None , **lowercase ) -> Optional[int]:
lowerCAmelCase , lowerCAmelCase = self.get_vision_text_model(lowercase , lowercase )
lowerCAmelCase = {"""vision_model""": vision_model, """text_model""": text_model}
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase )
lowerCAmelCase = model(
input_ids=lowercase , pixel_values=lowercase , attention_mask=lowercase , output_attentions=lowercase )
lowerCAmelCase = output.vision_model_output.attentions
self.assertEqual(len(lowercase ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase = to_atuple(vision_model.config.image_size )
lowerCAmelCase = to_atuple(vision_model.config.patch_size )
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
lowerCAmelCase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
lowerCAmelCase = output.text_model_output.attentions
self.assertEqual(len(lowercase ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
pt_model.to(lowercase )
pt_model.eval()
# prepare inputs
lowerCAmelCase = inputs_dict
lowerCAmelCase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
lowerCAmelCase = pt_model(**lowercase ).to_tuple()
lowerCAmelCase = fx_model(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase , pt_output.numpy() , 4e-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase , from_pt=lowercase )
lowerCAmelCase = fx_model_loaded(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ):
self.assert_almost_equals(lowercase , pt_output.numpy() , 4e-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowercase )
lowerCAmelCase = VisionTextDualEncoderModel.from_pretrained(lowercase , from_flax=lowercase )
pt_model_loaded.to(lowercase )
pt_model_loaded.eval()
with torch.no_grad():
lowerCAmelCase = pt_model_loaded(**lowercase ).to_tuple()
self.assertEqual(len(lowercase ) , len(lowercase ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ):
self.assert_almost_equals(lowercase , pt_output_loaded.numpy() , 4e-2 )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Dict:
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase )
lowerCAmelCase = VisionTextDualEncoderModel(lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel(lowercase )
lowerCAmelCase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowercase )
lowerCAmelCase = fx_state
self.check_pt_flax_equivalence(lowercase , lowercase , lowercase )
def _snake_case ( self , lowercase , lowercase , lowercase ) -> Any:
lowerCAmelCase = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase , lowercase )
lowerCAmelCase = VisionTextDualEncoderModel(lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel(lowercase )
lowerCAmelCase = load_flax_weights_in_pytorch_model(lowercase , fx_model.params )
self.check_pt_flax_equivalence(lowercase , lowercase , lowercase )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**lowercase )
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_save_load(**lowercase )
def _snake_case ( self ) -> Dict:
lowerCAmelCase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**lowercase )
@is_pt_flax_cross_test
def _snake_case ( self ) -> str:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase = config_inputs_dict.pop("""vision_config""" )
lowerCAmelCase = config_inputs_dict.pop("""text_config""" )
lowerCAmelCase = config_inputs_dict
self.check_equivalence_pt_to_flax(lowercase , lowercase , lowercase )
self.check_equivalence_flax_to_pt(lowercase , lowercase , lowercase )
@slow
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase , lowerCAmelCase = self.get_pretrained_model_and_inputs()
lowerCAmelCase = model_a(**lowercase )
lowerCAmelCase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(lowercase )
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained(lowercase )
lowerCAmelCase = model_a(**lowercase )
lowerCAmelCase = after_outputs[0]
lowerCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase , 1e-5 )
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-vit""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase , text_from_pt=lowercase , )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase = random_attention_mask([batch_size, 4] )
lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _snake_case ( self , lowercase , lowercase ) -> Dict:
lowerCAmelCase = FlaxViTModel(lowercase )
lowerCAmelCase = FlaxBertModel(lowercase )
return vision_model, text_model
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = FlaxViTModelTester(self )
lowerCAmelCase = FlaxBertModelTester(self )
lowerCAmelCase = vit_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase = vision_config_and_inputs
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
def _snake_case ( self ) -> str:
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
"""hf-internal-testing/tiny-random-clip""" , """hf-internal-testing/tiny-bert""" , vision_from_pt=lowercase , text_from_pt=lowercase , )
lowerCAmelCase = 13
lowerCAmelCase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
lowerCAmelCase = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size )
lowerCAmelCase = random_attention_mask([batch_size, 4] )
lowerCAmelCase = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask}
return model, inputs
def _snake_case ( self , lowercase , lowercase ) -> Optional[Any]:
lowerCAmelCase = FlaxCLIPVisionModel(lowercase )
lowerCAmelCase = FlaxBertModel(lowercase )
return vision_model, text_model
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = FlaxCLIPVisionModelTester(self )
lowerCAmelCase = FlaxBertModelTester(self )
lowerCAmelCase = clip_model_tester.prepare_config_and_inputs()
lowerCAmelCase = bert_model_tester.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase = vision_config_and_inputs
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class lowercase ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
lowerCAmelCase = FlaxVisionTextDualEncoderModel.from_pretrained("""clip-italian/clip-italian""" , logit_scale_init_value=1.0 )
lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained("""clip-italian/clip-italian""" )
lowerCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
lowerCAmelCase = processor(
text=["""una foto di un gatto""", """una foto di un cane"""] , images=lowercase , padding=lowercase , return_tensors="""np""" )
lowerCAmelCase = model(**lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
lowerCAmelCase = np.array([[1.2_284_727, 0.3_104_122]] )
self.assertTrue(np.allclose(outputs.logits_per_image , lowercase , atol=1e-3 ) )
| 46
|
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase_ = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE__ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
def UpperCAmelCase_ (self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = """this is a test"""
UpperCamelCase__ = """this is a test"""
return input_text, output_text
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """<pad>"""
UpperCamelCase__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 3_00_01 )
def UpperCAmelCase_ (self ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = """ \tHeLLo!how \n Are yoU? """
UpperCamelCase__ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , split_by_punct=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = """This is a test"""
UpperCamelCase__ = [13, 1, 43_98, 25, 21, 12_89]
UpperCamelCase__ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# fmt: off
UpperCamelCase__ = """I was born in 92000, and this is falsé."""
UpperCamelCase__ = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9]
UpperCamelCase__ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
UpperCamelCase__ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
UpperCamelCase__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = DebertaVaTokenizer(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.encode("""sequence builders""" )
UpperCamelCase__ = tokenizer.encode("""multi-sequence build""" )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , SCREAMING_SNAKE_CASE_ , )
@slow
def UpperCAmelCase_ (self ):
# fmt: off
UpperCamelCase__ = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 244
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_a : Dict = logging.get_logger(__name__)
_a : Optional[int] = """▁"""
_a : Any = {"""vocab_file""": """sentencepiece.bpe.model"""}
_a : List[Any] = {
"""vocab_file""": {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"""
),
}
}
_a : Tuple = {
"""xlm-roberta-base""": 5_1_2,
"""xlm-roberta-large""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-dutch""": 5_1_2,
"""xlm-roberta-large-finetuned-conll02-spanish""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-english""": 5_1_2,
"""xlm-roberta-large-finetuned-conll03-german""": 5_1_2,
}
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : List[Any] =VOCAB_FILES_NAMES
a : str =PRETRAINED_VOCAB_FILES_MAP
a : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] =["""input_ids""", """attention_mask"""]
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="</s>",__SCREAMING_SNAKE_CASE="<s>",__SCREAMING_SNAKE_CASE="<unk>",__SCREAMING_SNAKE_CASE="<pad>",__SCREAMING_SNAKE_CASE="<mask>",__SCREAMING_SNAKE_CASE = None,**__SCREAMING_SNAKE_CASE,):
'''simple docstring'''
__lowerCAmelCase = AddedToken(__SCREAMING_SNAKE_CASE,lstrip=__SCREAMING_SNAKE_CASE,rstrip=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__SCREAMING_SNAKE_CASE,eos_token=__SCREAMING_SNAKE_CASE,unk_token=__SCREAMING_SNAKE_CASE,sep_token=__SCREAMING_SNAKE_CASE,cls_token=__SCREAMING_SNAKE_CASE,pad_token=__SCREAMING_SNAKE_CASE,mask_token=__SCREAMING_SNAKE_CASE,sp_model_kwargs=self.sp_model_kwargs,**__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__lowerCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__lowerCAmelCase = 1
__lowerCAmelCase = len(self.sp_model ) + self.fairseq_offset
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
'''simple docstring'''
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self,"""sp_model_kwargs""" ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE,token_ids_a=__SCREAMING_SNAKE_CASE,already_has_special_tokens=__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE,out_type=__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__lowerCAmelCase = self.sp_model.PieceToId(__SCREAMING_SNAKE_CASE )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = """""".join(__SCREAMING_SNAKE_CASE ).replace(__SCREAMING_SNAKE_CASE,""" """ ).strip()
return out_string
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__lowerCAmelCase = os.path.join(
__SCREAMING_SNAKE_CASE,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file,__SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE,"""wb""" ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 46
|
'''simple docstring'''
import sys
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = len(lowercase )
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
__lowerCAmelCase = [[0 for x in range(lowercase )] for x in range(lowercase )]
for chain_length in range(2 , lowercase ):
for a in range(1 , n - chain_length + 1 ):
__lowerCAmelCase = a + chain_length - 1
__lowerCAmelCase = sys.maxsize
for c in range(lowercase , lowercase ):
__lowerCAmelCase = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
__lowerCAmelCase = cost
__lowerCAmelCase = c
return matrix, sol
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
if i == j:
print("""A""" + str(lowercase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(lowercase , lowercase , optimal_solution[i][j] )
print_optiomal_solution(lowercase , optimal_solution[i][j] + 1 , lowercase )
print(""")""" , end=""" """ )
def _lowerCAmelCase ( ) -> Dict:
__lowerCAmelCase = [30, 35, 15, 5, 10, 20, 25]
__lowerCAmelCase = len(lowercase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
__lowerCAmelCase , __lowerCAmelCase = matrix_chain_order(lowercase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 46
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=0 ) -> Optional[int]:
return sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase : x[column] )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> List[Any]:
for i in range(points_counts - 1 ):
for j in range(i + 1 , __UpperCAmelCase ):
lowercase__: List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: Union[str, Any] = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=float('''inf''' ) ) -> Tuple:
for i in range(min(6 , points_counts - 1 ) , __UpperCAmelCase ):
for j in range(max(0 , i - 6 ) , __UpperCAmelCase ):
lowercase__: List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
lowercase__: Tuple = current_dis
return min_dis
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
# base case
if points_counts <= 3:
return dis_between_closest_pair(__UpperCAmelCase , __UpperCAmelCase )
# recursion
lowercase__: List[Any] = points_counts // 2
lowercase__: int = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[:mid] , __UpperCAmelCase )
lowercase__: List[Any] = closest_pair_of_points_sqr(
__UpperCAmelCase , points_sorted_on_y[mid:] , points_counts - mid )
lowercase__: List[str] = min(__UpperCAmelCase , __UpperCAmelCase )
lowercase__: Optional[Any] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__UpperCAmelCase )
lowercase__: Any = dis_between_closest_in_strip(
__UpperCAmelCase , len(__UpperCAmelCase ) , __UpperCAmelCase )
return min(__UpperCAmelCase , __UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: int = column_based_sort(__UpperCAmelCase , column=0 )
lowercase__: Optional[Any] = column_based_sort(__UpperCAmelCase , column=1 )
return (
closest_pair_of_points_sqr(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
) ** 0.5
if __name__ == "__main__":
__A = [(2, 3), (1_2, 3_0), (4_0, 5_0), (5, 1), (1_2, 1_0), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 177
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = "deta"
_UpperCAmelCase :Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=900 , _UpperCAmelCase=2048 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=300 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__: str = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = backbone_config.pop('''model_type''' )
lowercase__: str = CONFIG_MAPPING[backbone_model_type]
lowercase__: Optional[int] = config_class.from_dict(_UpperCAmelCase )
lowercase__: int = backbone_config
lowercase__: Any = num_queries
lowercase__: List[str] = max_position_embeddings
lowercase__: Optional[Any] = d_model
lowercase__: List[Any] = encoder_ffn_dim
lowercase__: Tuple = encoder_layers
lowercase__: Dict = encoder_attention_heads
lowercase__: Any = decoder_ffn_dim
lowercase__: Union[str, Any] = decoder_layers
lowercase__: List[Any] = decoder_attention_heads
lowercase__: int = dropout
lowercase__: List[str] = attention_dropout
lowercase__: Tuple = activation_dropout
lowercase__: Tuple = activation_function
lowercase__: int = init_std
lowercase__: Optional[Any] = init_xavier_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: Optional[int] = auxiliary_loss
lowercase__: Union[str, Any] = position_embedding_type
# deformable attributes
lowercase__: List[str] = num_feature_levels
lowercase__: Optional[Any] = encoder_n_points
lowercase__: int = decoder_n_points
lowercase__: str = two_stage
lowercase__: Optional[int] = two_stage_num_proposals
lowercase__: Tuple = with_box_refine
lowercase__: str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase__: Union[str, Any] = class_cost
lowercase__: Optional[int] = bbox_cost
lowercase__: int = giou_cost
# Loss coefficients
lowercase__: Optional[int] = mask_loss_coefficient
lowercase__: List[str] = dice_loss_coefficient
lowercase__: str = bbox_loss_coefficient
lowercase__: Union[str, Any] = giou_loss_coefficient
lowercase__: Optional[int] = eos_coefficient
lowercase__: str = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return self.encoder_attention_heads
@property
def _snake_case ( self ):
return self.d_model
def _snake_case ( self ):
lowercase__: Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase__: Dict = self.backbone_config.to_dict()
lowercase__: Union[str, Any] = self.__class__.model_type
return output
| 177
| 1
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 362
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = ShapEPipeline
__SCREAMING_SNAKE_CASE : Optional[int] = ['prompt']
__SCREAMING_SNAKE_CASE : str = ['prompt']
__SCREAMING_SNAKE_CASE : Dict = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
@property
def _a (self ):
return 32
@property
def _a (self ):
return 32
@property
def _a (self ):
return self.time_input_dim * 4
@property
def _a (self ):
return 8
@property
def _a (self ):
A_ : Dict = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowercase )
@property
def _a (self ):
torch.manual_seed(0 )
A_ : Any = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ : List[str] = PriorTransformer(**lowercase )
return model
@property
def _a (self ):
torch.manual_seed(0 )
A_ : str = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ : Dict = ShapERenderer(**lowercase )
return model
def _a (self ):
A_ : Optional[int] = self.dummy_prior
A_ : Optional[int] = self.dummy_text_encoder
A_ : int = self.dummy_tokenizer
A_ : Dict = self.dummy_renderer
A_ : Tuple = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=1024 , prediction_type="""sample""" , use_karras_sigmas=lowercase , clip_sample=lowercase , clip_sample_range=1.0 , )
A_ : Union[str, Any] = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a (self , lowercase , lowercase=0 ):
if str(lowercase ).startswith("""mps""" ):
A_ : Any = torch.manual_seed(lowercase )
else:
A_ : str = torch.Generator(device=lowercase ).manual_seed(lowercase )
A_ : List[str] = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a (self ):
A_ : str = """cpu"""
A_ : Union[str, Any] = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**lowercase )
A_ : str = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : Dict = pipe(**self.get_dummy_inputs(lowercase ) )
A_ : Dict = output.images[0]
A_ : int = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ : Tuple = np.array(
[
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
0.00_03_92_16,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a (self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a (self ):
A_ : Tuple = torch_device == """cpu"""
A_ : Union[str, Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowercase , relax_max_difference=lowercase , )
def _a (self ):
A_ : List[Any] = self.get_dummy_components()
A_ : Any = self.pipeline_class(**lowercase )
A_ : Any = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = 1
A_ : Union[str, Any] = 2
A_ : Dict = self.get_dummy_inputs(lowercase )
for key in inputs.keys():
if key in self.batch_params:
A_ : Optional[Any] = batch_size * [inputs[key]]
A_ : List[Any] = pipe(**lowercase , num_images_per_prompt=lowercase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self ):
A_ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
A_ : Tuple = ShapEPipeline.from_pretrained("""openai/shap-e""" )
A_ : int = pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
A_ : int = torch.Generator(device=lowercase ).manual_seed(0 )
A_ : List[str] = pipe(
"""a shark""" , generator=lowercase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowercase , lowercase )
| 135
| 0
|
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
_lowerCAmelCase : Union[str, Any] = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
_lowerCAmelCase : List[str] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
_lowerCAmelCase : int = dict(zip(vocab, range(len(vocab))))
_lowerCAmelCase : List[str] = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Any = Path(tmpdirname)
_lowerCAmelCase : int = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
_lowerCAmelCase : Dict = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
_lowerCAmelCase : List[Any] = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
_lowerCAmelCase : int = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
_lowerCAmelCase : List[str] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
_lowerCAmelCase : Optional[Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
_lowerCAmelCase : List[str] = tokenizer(["Making tiny model"], return_tensors="pt")
_lowerCAmelCase : Union[str, Any] = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 169
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = IFImgaImgSuperResolutionPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
lowerCAmelCase_ = PipelineTesterMixin.required_optional_params - {"latents"}
def snake_case ( self : int ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case ( self : Optional[Any] , __lowercase : Union[str, Any] , __lowercase : List[str]=0 ):
"""simple docstring"""
if str(__lowercase ).startswith('mps' ):
__lowercase =torch.manual_seed(__lowercase )
else:
__lowercase =torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__lowercase =floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase =floats_tensor((1, 3, 16, 16) , rng=random.Random(__lowercase ) ).to(__lowercase )
__lowercase ={
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def snake_case ( self : int ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case ( self : str ):
"""simple docstring"""
self._test_save_load_local()
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 141
| 0
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_A = logging.get_logger(__name__)
_A = {
'''SenseTime/deformable-detr''': '''https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json''',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : List[Any] = """deformable_detr"""
A__ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=3_0_0 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=6 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=8 , __UpperCamelCase=6 , __UpperCamelCase=1_0_2_4 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=2_5_6 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=0.02 , __UpperCamelCase=1.0 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="sine" , __UpperCamelCase="resnet50" , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase=False , __UpperCamelCase=3_0_0 , __UpperCamelCase=False , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=1 , __UpperCamelCase=5 , __UpperCamelCase=2 , __UpperCamelCase=0.1 , __UpperCamelCase=0.25 , __UpperCamelCase=False , **__UpperCamelCase , ):
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
UpperCamelCase_ = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = backbone_config.get("""model_type""" )
UpperCamelCase_ = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase_ = config_class.from_dict(__UpperCamelCase )
UpperCamelCase_ = use_timm_backbone
UpperCamelCase_ = backbone_config
UpperCamelCase_ = num_channels
UpperCamelCase_ = num_queries
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = d_model
UpperCamelCase_ = encoder_ffn_dim
UpperCamelCase_ = encoder_layers
UpperCamelCase_ = encoder_attention_heads
UpperCamelCase_ = decoder_ffn_dim
UpperCamelCase_ = decoder_layers
UpperCamelCase_ = decoder_attention_heads
UpperCamelCase_ = dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = activation_function
UpperCamelCase_ = init_std
UpperCamelCase_ = init_xavier_std
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = auxiliary_loss
UpperCamelCase_ = position_embedding_type
UpperCamelCase_ = backbone
UpperCamelCase_ = use_pretrained_backbone
UpperCamelCase_ = dilation
# deformable attributes
UpperCamelCase_ = num_feature_levels
UpperCamelCase_ = encoder_n_points
UpperCamelCase_ = decoder_n_points
UpperCamelCase_ = two_stage
UpperCamelCase_ = two_stage_num_proposals
UpperCamelCase_ = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
UpperCamelCase_ = class_cost
UpperCamelCase_ = bbox_cost
UpperCamelCase_ = giou_cost
# Loss coefficients
UpperCamelCase_ = mask_loss_coefficient
UpperCamelCase_ = dice_loss_coefficient
UpperCamelCase_ = bbox_loss_coefficient
UpperCamelCase_ = giou_loss_coefficient
UpperCamelCase_ = eos_coefficient
UpperCamelCase_ = focal_alpha
UpperCamelCase_ = disable_custom_kernels
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self.d_model
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
UpperCamelCase_ = self.backbone_config.to_dict()
UpperCamelCase_ = self.__class__.model_type
return output
| 261
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_A = logging.get_logger(__name__)
class lowercase_ ( __SCREAMING_SNAKE_CASE ):
A__ : Union[str, Any] = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = PILImageResampling.BICUBIC , __UpperCamelCase = True , __UpperCamelCase = True , __UpperCamelCase = 1 / 2_5_5 , __UpperCamelCase = None , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
super().__init__(**__UpperCamelCase )
UpperCamelCase_ = size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
UpperCamelCase_ = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase , param_name="""crop_size""" )
UpperCamelCase_ = do_resize
UpperCamelCase_ = do_rescale
UpperCamelCase_ = do_normalize
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = PILImageResampling.BILINEAR , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "shortest_edge" in size:
UpperCamelCase_ = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase_ = (size["""height"""], size["""width"""])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase ):
"""simple docstring"""
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase , ):
"""simple docstring"""
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , ):
"""simple docstring"""
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(__UpperCamelCase , param_name="""crop_size""" , default_to_square=__UpperCamelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(__UpperCamelCase )
if not is_batched(__UpperCamelCase ):
UpperCamelCase_ = [images]
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
UpperCamelCase_ = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
| 261
| 1
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def UpperCamelCase__ ( lowercase__ : str ):
return "".join(sorted(lowercase__ ) )
def UpperCamelCase__ ( lowercase__ : str ):
return word_by_signature[signature(lowercase__ )]
__A = Path(__file__).parent.joinpath("words.txt").read_text(encoding="utf-8")
__A = sorted({word.strip().lower() for word in data.splitlines()})
__A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("anagrams.txt", "w") as file:
file.write("all_anagrams = \n ")
file.write(pprint.pformat(all_anagrams))
| 148
|
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
__A = "path-to-your-trained-model"
__A = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__A = "A photo of sks dog in a bucket"
__A = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 148
| 1
|
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCamelCase ( A_ , unittest.TestCase ):
UpperCAmelCase__ : Union[str, Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def UpperCAmelCase(self : Tuple , _A : List[Any]=0 ) -> str:
snake_case = floats_tensor((1, 3, 1_2_8, 1_2_8) , rng=random.Random(_A ) )
snake_case = np.random.RandomState(_A )
snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase(self : Any ) -> Tuple:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_A )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCAmelCase(self : str ) -> Dict:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase(self : Optional[Any] ) -> Tuple:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
# warmup pass to apply optimizations
snake_case = pipe(**self.get_dummy_inputs() )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase(self : List[str] ) -> Any:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase(self : List[Any] ) -> int:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCAmelCase(self : Optional[int] ) -> Union[str, Any]:
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
snake_case = self.get_dummy_inputs()
snake_case = pipe(**_A ).images
snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_2_8, 1_2_8, 3)
snake_case = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
@property
def UpperCAmelCase(self : Optional[Any] ) -> int:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase(self : List[str] ) -> int:
snake_case = ort.SessionOptions()
snake_case = False
return options
def UpperCAmelCase(self : str ) -> List[str]:
snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case = init_image.resize((7_6_8, 5_1_2) )
# using the PNDM scheduler by default
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
snake_case = "A fantasy landscape, trending on artstation"
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=1_0 , generator=_A , output_type="np" , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCAmelCase(self : Any ) -> Any:
snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case = init_image.resize((7_6_8, 5_1_2) )
snake_case = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
snake_case = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
snake_case = "A fantasy landscape, trending on artstation"
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=2_0 , generator=_A , output_type="np" , )
snake_case = output.images
snake_case = images[0, 2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert images.shape == (1, 5_1_2, 7_6_8, 3)
snake_case = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 137
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_A = logging.get_logger(__name__)
_A = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_A = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
_A = {
"bert-base-uncased": 5_12,
"bert-large-uncased": 5_12,
"bert-base-cased": 5_12,
"bert-large-cased": 5_12,
"bert-base-multilingual-uncased": 5_12,
"bert-base-multilingual-cased": 5_12,
"bert-base-chinese": 5_12,
"bert-base-german-cased": 5_12,
"bert-large-uncased-whole-word-masking": 5_12,
"bert-large-cased-whole-word-masking": 5_12,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_12,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_12,
"bert-base-cased-finetuned-mrpc": 5_12,
"bert-base-german-dbmdz-cased": 5_12,
"bert-base-german-dbmdz-uncased": 5_12,
"TurkuNLP/bert-base-finnish-cased-v1": 5_12,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_12,
"wietsedv/bert-base-dutch-cased": 5_12,
}
_A = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class lowerCamelCase ( A_ ):
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : int = BertTokenizer
def __init__(self : Union[str, Any] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : List[str]=True , _A : List[Any]="[UNK]" , _A : Union[str, Any]="[SEP]" , _A : List[Any]="[PAD]" , _A : List[Any]="[CLS]" , _A : Union[str, Any]="[MASK]" , _A : int=True , _A : Tuple=None , **_A : Optional[int] , ) -> int:
super().__init__(
_A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , tokenize_chinese_chars=_A , strip_accents=_A , **_A , )
snake_case = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _A ) != do_lower_case
or normalizer_state.get("strip_accents" , _A ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _A ) != tokenize_chinese_chars
):
snake_case = getattr(_A , normalizer_state.pop("type" ) )
snake_case = do_lower_case
snake_case = strip_accents
snake_case = tokenize_chinese_chars
snake_case = normalizer_class(**_A )
snake_case = do_lower_case
def UpperCAmelCase(self : str , _A : Union[str, Any] , _A : int=None ) -> Any:
snake_case = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase(self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase(self : Union[str, Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(_A , name=_A )
return tuple(_A )
| 137
| 1
|
"""simple docstring"""
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"microsoft/conditional-detr-resnet-50": (
"https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'conditional_detr'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowercase=True , lowercase=None , lowercase=3 , lowercase=300 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=6 , lowercase=2_048 , lowercase=8 , lowercase=0.0 , lowercase=0.0 , lowercase=True , lowercase="relu" , lowercase=256 , lowercase=0.1 , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1.0 , lowercase=False , lowercase="sine" , lowercase="resnet50" , lowercase=True , lowercase=False , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=1 , lowercase=1 , lowercase=2 , lowercase=5 , lowercase=2 , lowercase=0.25 , **lowercase , ) -> str:
if backbone_config is not None and use_timm_backbone:
raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" )
if not use_timm_backbone:
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
lowerCAmelCase = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] )
elif isinstance(lowercase , lowercase ):
lowerCAmelCase = backbone_config.get("""model_type""" )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(lowercase )
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = cls_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=lowercase , **lowercase )
@property
def _snake_case ( self ) -> int:
return self.encoder_attention_heads
@property
def _snake_case ( self ) -> int:
return self.d_model
def _snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = version.parse('1.11' )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""pixel_mask""", {0: """batch"""}),
] )
@property
def _snake_case ( self ) -> float:
return 1e-5
@property
def _snake_case ( self ) -> int:
return 12
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"allenai/longformer-base-4096": "https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json",
"allenai/longformer-large-4096": "https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json",
"allenai/longformer-large-4096-finetuned-triviaqa": (
"https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"
),
"allenai/longformer-base-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"
),
"allenai/longformer-large-4096-extra.pos.embd.only": (
"https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"
),
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'longformer'
def __init__( self , lowercase = 512 , lowercase = 2 , lowercase = 1 , lowercase = 0 , lowercase = 2 , lowercase = 30_522 , lowercase = 768 , lowercase = 12 , lowercase = 12 , lowercase = 3_072 , lowercase = "gelu" , lowercase = 0.1 , lowercase = 0.1 , lowercase = 512 , lowercase = 2 , lowercase = 0.02 , lowercase = 1e-12 , lowercase = False , **lowercase , ) -> Optional[int]:
super().__init__(pad_token_id=lowercase , **lowercase )
lowerCAmelCase = attention_window
lowerCAmelCase = sep_token_id
lowerCAmelCase = bos_token_id
lowerCAmelCase = eos_token_id
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = hidden_act
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = onnx_export
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase = "default" , lowercase = None ) -> Tuple:
super().__init__(lowercase , lowercase , lowercase )
lowerCAmelCase = True
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""global_attention_mask""", dynamic_axis),
] )
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
lowerCAmelCase = super().outputs
if self.task == "default":
lowerCAmelCase = {0: """batch"""}
return outputs
@property
def _snake_case ( self ) -> float:
return 1e-4
@property
def _snake_case ( self ) -> int:
# needs to be >= 14 to support tril operator
return max(super().default_onnx_opset , 14 )
def _snake_case ( self , lowercase , lowercase = -1 , lowercase = -1 , lowercase = False , lowercase = None , ) -> Mapping[str, Any]:
lowerCAmelCase = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowerCAmelCase = torch.zeros_like(inputs["""input_ids"""] )
# make every second token global
lowerCAmelCase = 1
return inputs
| 46
| 1
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__UpperCAmelCase = get_tests_dir('''fixtures''')
class lowerCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self ) -> int:
# A mock response for an HTTP head request to emulate server down
UpperCamelCase : Dict = mock.Mock()
UpperCamelCase : Optional[Any] = 500
UpperCamelCase : Union[str, Any] = {}
UpperCamelCase : Union[str, Any] = HTTPError
UpperCamelCase : Any = {}
# Download this model to make sure it's in the cache.
UpperCamelCase : Tuple = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=SCREAMING_SNAKE_CASE_ ) as mock_head:
UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained('hf-internal-testing/tiny-random-vit' )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ) -> Optional[int]:
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase : Optional[int] = ViTImageProcessor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json' )
def snake_case_ ( self ) -> List[Any]:
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
# config is in subfolder, the following should not work without specifying the subfolder
UpperCamelCase : Dict = AutoImageProcessor.from_pretrained('hf-internal-testing/stable-diffusion-all-variants' )
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/stable-diffusion-all-variants', subfolder='feature_extractor' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@is_staging_test
class lowerCAmelCase_ ( unittest.TestCase ):
@classmethod
def snake_case_ ( cls ) -> Dict:
UpperCamelCase : List[Any] = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls ) -> List[Any]:
try:
delete_repo(token=cls._token, repo_id='test-image-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-image-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-image-processor' )
except HTTPError:
pass
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('test-image-processor', use_auth_token=self._token )
UpperCamelCase : Any = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE_, repo_id='test-image-processor', push_to_hub=SCREAMING_SNAKE_CASE_, use_auth_token=self._token )
UpperCamelCase : List[str] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : List[Any] = ViTImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('valid_org/test-image-processor', use_auth_token=self._token )
UpperCamelCase : List[str] = ViTImageProcessor.from_pretrained('valid_org/test-image-processor' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-image-processor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
SCREAMING_SNAKE_CASE_, repo_id='valid_org/test-image-processor-org', push_to_hub=SCREAMING_SNAKE_CASE_, use_auth_token=self._token )
UpperCamelCase : Dict = ViTImageProcessor.from_pretrained('valid_org/test-image-processor-org' )
for k, v in image_processor.__dict__.items():
self.assertEqual(SCREAMING_SNAKE_CASE_, getattr(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
def snake_case_ ( self ) -> Optional[int]:
CustomImageProcessor.register_for_auto_class()
UpperCamelCase : str = CustomImageProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub('test-dynamic-image-processor', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map, {'AutoImageProcessor': 'custom_image_processing.CustomImageProcessor'}, )
UpperCamelCase : Union[str, Any] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""", trust_remote_code=SCREAMING_SNAKE_CASE_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__, 'CustomImageProcessor' )
| 356
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def UpperCamelCase ( snake_case__ : List[Any] ) -> Dict:
from transformers.testing_utils import pytest_terminal_summary_main
UpperCamelCase : Optional[int] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 103
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.