code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> str:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__UpperCAmelCase ,speech_processor=__UpperCAmelCase ,vae=__UpperCAmelCase ,text_encoder=__UpperCAmelCase ,tokenizer=__UpperCAmelCase ,unet=__UpperCAmelCase ,scheduler=__UpperCAmelCase ,feature_extractor=__UpperCAmelCase ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase = "auto" ) -> str:
if slice_size == "auto":
lowerCAmelCase__ : int = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[int]:
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=1_6000 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 512 ,__UpperCAmelCase = 50 ,__UpperCAmelCase = 7.5 ,__UpperCAmelCase = None ,__UpperCAmelCase = 1 ,__UpperCAmelCase = 0.0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "pil" ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = 1 ,**__UpperCAmelCase ,) -> List[str]:
lowerCAmelCase__ : Optional[int] = self.speech_processor.feature_extractor(
__UpperCAmelCase ,return_tensors="""pt""" ,sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
lowerCAmelCase__ : Any = self.speech_model.generate(__UpperCAmelCase ,max_length=48_0000 )
lowerCAmelCase__ : Union[str, Any] = self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Any = 1
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Tuple = len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get prompt text embeddings
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
lowerCAmelCase__ : List[str] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowerCAmelCase__ : Optional[int] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowerCAmelCase__ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
lowerCAmelCase__ : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = text_embeddings.shape
lowerCAmelCase__ : Tuple = text_embeddings.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : List[str] = text_embeddings.view(bs_embed * num_images_per_prompt ,__UpperCAmelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ : Optional[int] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ : List[str]
if negative_prompt is None:
lowerCAmelCase__ : Any = [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
lowerCAmelCase__ : Any = [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
lowerCAmelCase__ : List[Any] = negative_prompt
lowerCAmelCase__ : List[str] = text_input_ids.shape[-1]
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
__UpperCAmelCase ,padding="""max_length""" ,max_length=__UpperCAmelCase ,truncation=__UpperCAmelCase ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ : Optional[int] = uncond_embeddings.shape[1]
lowerCAmelCase__ : int = uncond_embeddings.repeat(1 ,__UpperCAmelCase ,1 )
lowerCAmelCase__ : Any = uncond_embeddings.view(batch_size * num_images_per_prompt ,__UpperCAmelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ : Tuple = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowerCAmelCase__ : Dict = torch.randn(__UpperCAmelCase ,generator=__UpperCAmelCase ,device="""cpu""" ,dtype=__UpperCAmelCase ).to(
self.device )
else:
lowerCAmelCase__ : int = torch.randn(__UpperCAmelCase ,generator=__UpperCAmelCase ,device=self.device ,dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowerCAmelCase__ : int = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowerCAmelCase__ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : Any = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : List[str] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : int = {}
if accepts_eta:
lowerCAmelCase__ : List[str] = eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ : Any = self.scheduler.scale_model_input(__UpperCAmelCase ,__UpperCAmelCase )
# predict the noise residual
lowerCAmelCase__ : List[str] = self.unet(__UpperCAmelCase ,__UpperCAmelCase ,encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = noise_pred.chunk(2 )
lowerCAmelCase__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Dict = self.scheduler.step(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = 1 / 0.1_8_2_1_5 * latents
lowerCAmelCase__ : Optional[int] = self.vae.decode(__UpperCAmelCase ).sample
lowerCAmelCase__ : Any = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ : Dict = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase ,nsfw_content_detected=__UpperCAmelCase )
| 37
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''time_embed.0.weight''', '''time_embedding.linear_1.weight'''),
('''time_embed.0.bias''', '''time_embedding.linear_1.bias'''),
('''time_embed.2.weight''', '''time_embedding.linear_2.weight'''),
('''time_embed.2.bias''', '''time_embedding.linear_2.bias'''),
('''input_blocks.0.0.weight''', '''conv_in.weight'''),
('''input_blocks.0.0.bias''', '''conv_in.bias'''),
('''out.0.weight''', '''conv_norm_out.weight'''),
('''out.0.bias''', '''conv_norm_out.bias'''),
('''out.2.weight''', '''conv_out.weight'''),
('''out.2.bias''', '''conv_out.bias'''),
]
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''in_layers.0''', '''norm1'''),
('''in_layers.2''', '''conv1'''),
('''out_layers.0''', '''norm2'''),
('''out_layers.3''', '''conv2'''),
('''emb_layers.1''', '''time_emb_proj'''),
('''skip_connection''', '''conv_shortcut'''),
]
_lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
_lowerCAmelCase = F"""down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.0."""
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""input_blocks.{3*i + j + 1}.1."""
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
_lowerCAmelCase = F"""up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.0."""
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
_lowerCAmelCase = F"""up_blocks.{i}.attentions.{j}."""
_lowerCAmelCase = F"""output_blocks.{3*i + j}.1."""
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0.conv."""
_lowerCAmelCase = F"""input_blocks.{3*(i+1)}.0.op."""
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""output_blocks.{3*i + 2}.{1 if i == 0 else 2}."""
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
_lowerCAmelCase = '''mid_block.attentions.0.'''
_lowerCAmelCase = '''middle_block.1.'''
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{j}."""
_lowerCAmelCase = F"""middle_block.{2*j}."""
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Any = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase__ : Optional[int] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase__ : Any = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase__ : List[Any] = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
lowerCAmelCase__ : Tuple = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''nin_shortcut''', '''conv_shortcut'''),
('''norm_out''', '''conv_norm_out'''),
('''mid.attn_1.''', '''mid_block.attentions.0.'''),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
_lowerCAmelCase = F"""encoder.down_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""encoder.down.{i}.block.{j}."""
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
_lowerCAmelCase = F"""down_blocks.{i}.downsamplers.0."""
_lowerCAmelCase = F"""down.{i}.downsample."""
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
_lowerCAmelCase = F"""up_blocks.{i}.upsamplers.0."""
_lowerCAmelCase = F"""up.{3-i}.upsample."""
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
_lowerCAmelCase = F"""decoder.up_blocks.{i}.resnets.{j}."""
_lowerCAmelCase = F"""decoder.up.{3-i}.block.{j}."""
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
_lowerCAmelCase = F"""mid_block.resnets.{i}."""
_lowerCAmelCase = F"""mid.block_{i+1}."""
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''norm.''', '''group_norm.'''),
('''q.''', '''query.'''),
('''k.''', '''key.'''),
('''v.''', '''value.'''),
('''proj_out.''', '''proj_attn.'''),
]
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return w.reshape(*w.shape , 1 , 1 )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase__ : str = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase__ : Dict = v.replace(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = v
lowerCAmelCase__ : Union[str, Any] = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase__ : Tuple = ["""q""", """k""", """v""", """proj_out"""]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"""mid.attn_1.{weight_name}.weight""" in k:
print(f"""Reshaping {k} for SD format""" )
lowerCAmelCase__ : Optional[int] = reshape_weight_for_sd(UpperCamelCase )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
_lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
('''resblocks.''', '''text_model.encoder.layers.'''),
('''ln_1''', '''layer_norm1'''),
('''ln_2''', '''layer_norm2'''),
('''.c_fc.''', '''.fc1.'''),
('''.c_proj.''', '''.fc2.'''),
('''.attn''', '''.self_attn'''),
('''ln_final.''', '''transformer.text_model.final_layer_norm.'''),
('''token_embedding.weight''', '''transformer.text_model.embeddings.token_embedding.weight'''),
('''positional_embedding''', '''transformer.text_model.embeddings.position_embedding.weight'''),
]
_lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
_lowerCAmelCase = re.compile('''|'''.join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
_lowerCAmelCase = {'''q''': 0, '''k''': 1, '''v''': 2}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : int = {}
lowerCAmelCase__ : List[Any] = {}
for k, v in text_enc_dict.items():
if (
k.endswith(""".self_attn.q_proj.weight""" )
or k.endswith(""".self_attn.k_proj.weight""" )
or k.endswith(""".self_attn.v_proj.weight""" )
):
lowerCAmelCase__ : Optional[int] = k[: -len(""".q_proj.weight""" )]
lowerCAmelCase__ : Tuple = k[-len("""q_proj.weight""" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase__ : List[Any] = [None, None, None]
lowerCAmelCase__ : Dict = v
continue
if (
k.endswith(""".self_attn.q_proj.bias""" )
or k.endswith(""".self_attn.k_proj.bias""" )
or k.endswith(""".self_attn.v_proj.bias""" )
):
lowerCAmelCase__ : str = k[: -len(""".q_proj.bias""" )]
lowerCAmelCase__ : List[str] = k[-len("""q_proj.bias""" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase__ : Union[str, Any] = [None, None, None]
lowerCAmelCase__ : Any = v
continue
lowerCAmelCase__ : Dict = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : Any = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : Tuple = torch.cat(UpperCamelCase )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("""CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing""" )
lowerCAmelCase__ : str = textenc_pattern.sub(lambda UpperCamelCase : protected[re.escape(m.group(0 ) )] , UpperCamelCase )
lowerCAmelCase__ : List[Any] = torch.cat(UpperCamelCase )
return new_state_dict
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
return text_enc_dict
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_path''', default=None, type=str, required=True, help='''Path to the model to convert.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--half''', action='''store_true''', help='''Save weights in half precision.''')
parser.add_argument(
'''--use_safetensors''', action='''store_true''', help='''Save weights use safetensors, default is ckpt.'''
)
_lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.safetensors''')
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''model.safetensors''')
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
_lowerCAmelCase = load_file(unet_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''unet''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(unet_path, map_location='''cpu''')
if osp.exists(vae_path):
_lowerCAmelCase = load_file(vae_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''vae''', '''diffusion_pytorch_model.bin''')
_lowerCAmelCase = torch.load(vae_path, map_location='''cpu''')
if osp.exists(text_enc_path):
_lowerCAmelCase = load_file(text_enc_path, device='''cpu''')
else:
_lowerCAmelCase = osp.join(args.model_path, '''text_encoder''', '''pytorch_model.bin''')
_lowerCAmelCase = torch.load(text_enc_path, map_location='''cpu''')
# Convert the UNet model
_lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
_lowerCAmelCase = {'''model.diffusion_model.''' + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
_lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
_lowerCAmelCase = {'''first_stage_model.''' + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
_lowerCAmelCase = '''text_model.encoder.layers.22.layer_norm2.bias''' in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
_lowerCAmelCase = {'''transformer.''' + k: v for k, v in text_enc_dict.items()}
_lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.model.''' + k: v for k, v in text_enc_dict.items()}
else:
_lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
_lowerCAmelCase = {'''cond_stage_model.transformer.''' + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
_lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
_lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
_lowerCAmelCase = {'''state_dict''': state_dict}
torch.save(state_dict, args.checkpoint_path)
| 37
| 1
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list[float] ):
if len(snake_case_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
snake_case__ : Union[str, Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 286
|
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : str , snake_case_ : Union[str, Any] ):
# Initialise PyTorch model
snake_case__ : List[str] = MobileBertConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : Dict = MobileBertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
snake_case__ : Any = load_tf_weights_in_mobilebert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--mobilebert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained MobileBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 286
| 1
|
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE :List[Any] = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE :List[str] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE :str = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Tuple:
"""simple docstring"""
if label_map is not None:
for old_id, new_id in label_map.items():
__A = new_id
# turn into Numpy arrays
__A = np.array(a_ )
__A = np.array(a_ )
if reduce_labels:
__A = 2_5_5
__A = label - 1
__A = 2_5_5
__A = label != ignore_index
__A = np.not_equal(a_ , a_ )
__A = pred_label[mask]
__A = np.array(a_ )[mask]
__A = pred_label[pred_label == label]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = np.histogram(a_ , bins=a_ , range=(0, num_labels - 1) )[0]
__A = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = False , ) -> Union[str, Any]:
"""simple docstring"""
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
__A = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a_ , a_ ):
__A , __A , __A , __A = intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def UpperCAmelCase ( a_ , a_ , a_ , a_ , a_ = None , a_ = None , a_ = False , ) -> str:
"""simple docstring"""
__A , __A , __A , __A = total_intersect_and_union(
a_ , a_ , a_ , a_ , a_ , a_ )
# compute metrics
__A = {}
__A = total_area_intersect.sum() / total_area_label.sum()
__A = total_area_intersect / total_area_union
__A = total_area_intersect / total_area_label
__A = np.nanmean(a_ )
__A = np.nanmean(a_ )
__A = all_acc
__A = iou
__A = acc
if nan_to_num is not None:
__A = {metric: np.nan_to_num(a_ , nan=a_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase_ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"predictions": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
"references": datasets.Sequence(datasets.Sequence(datasets.Value("uint16" ) ) ),
} ) ,reference_urls=[
"https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"
] ,)
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Optional[Any] ,A : int ,A : bool ,A : Optional[int] = None ,A : Optional[Dict[int, int]] = None ,A : bool = False ,):
__A = mean_iou(
results=A ,gt_seg_maps=A ,num_labels=A ,ignore_index=A ,nan_to_num=A ,label_map=A ,reduce_labels=A ,)
return iou_result
| 15
|
SCREAMING_SNAKE_CASE :Any = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE :Union[str, Any] = 100_0003
def UpperCAmelCase ( a_ , a_ ) -> bool:
"""simple docstring"""
__A = len(a_ )
__A = len(a_ )
if p_len > t_len:
return False
__A = 0
__A = 0
__A = 1
# Calculating the hash of pattern and substring of text
for i in range(a_ ):
__A = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__A = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__A = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__A = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def UpperCAmelCase ( ) -> None:
"""simple docstring"""
__A = "abc1abc12"
__A = "alskfjaldsabc1abc1abc12k23adsfabcabc"
__A = "alskfjaldsk23adsfabcabc"
assert rabin_karp(a_ , a_ ) and not rabin_karp(a_ , a_ )
# Test 2)
__A = "ABABX"
__A = "ABABZABABYABABX"
assert rabin_karp(a_ , a_ )
# Test 3)
__A = "AAAB"
__A = "ABAAAAAB"
assert rabin_karp(a_ , a_ )
# Test 4)
__A = "abcdabcy"
__A = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(a_ , a_ )
# Test 5)
__A = "Lü"
__A = "Lüsai"
assert rabin_karp(a_ , a_ )
__A = "Lue"
assert not rabin_karp(a_ , a_ )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 15
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
_a : Optional[Any] = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , a__ , )
super().__init__(*a__ , **a__ )
| 126
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 100 ) -> int:
_lowerCAmelCase : Optional[Any] = n * (n + 1) * (2 * n + 1) / 6
_lowerCAmelCase : Tuple = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 126
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :List[str] , snake_case :Any , snake_case :Optional[int]=7 , snake_case :str=3 , snake_case :Optional[int]=18 , snake_case :Union[str, Any]=30 , snake_case :Tuple=400 , snake_case :Optional[int]=True , snake_case :Optional[Any]=None , snake_case :Tuple=True , snake_case :Tuple=None , snake_case :str=True , snake_case :List[Any]=[0.48145466, 0.4578275, 0.40821073] , snake_case :str=[0.26862954, 0.26130258, 0.27577711] , snake_case :List[Any]=True , ):
'''simple docstring'''
A_ : Tuple = size if size is not None else {"height": 224, "width": 224}
A_ : Union[str, Any] = crop_size if crop_size is not None else {"height": 18, "width": 18}
A_ : Tuple = parent
A_ : str = batch_size
A_ : Any = num_channels
A_ : Tuple = image_size
A_ : Tuple = min_resolution
A_ : List[str] = max_resolution
A_ : Dict = do_resize
A_ : Any = size
A_ : Optional[int] = do_center_crop
A_ : str = crop_size
A_ : List[str] = do_normalize
A_ : Optional[int] = image_mean
A_ : List[str] = image_std
A_ : Union[str, Any] = do_convert_rgb
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def SCREAMING_SNAKE_CASE ( self :str , snake_case :Optional[Any]=False , snake_case :Optional[Any]=False , snake_case :Optional[Any]=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A_ : Any = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
A_ : Tuple = []
for i in range(self.batch_size ):
A_ , A_ : Union[str, Any] = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(255 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A_ : Any = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
if torchify:
A_ : Union[str, Any] = [torch.from_numpy(snake_case ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Any = ChineseCLIPImageProcessingTester(self , do_center_crop=snake_case )
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case , "center_crop" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 224, "width": 224} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
A_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : Any = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : str = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : Optional[Any] = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
A_ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : int = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
A_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : str = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
A_ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : Any = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
@require_torch
@require_vision
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = ChineseCLIPImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Tuple = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=snake_case )
A_ : Dict = 3
@property
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , "do_resize" ) )
self.assertTrue(hasattr(snake_case , "size" ) )
self.assertTrue(hasattr(snake_case , "do_center_crop" ) )
self.assertTrue(hasattr(snake_case , "center_crop" ) )
self.assertTrue(hasattr(snake_case , "do_normalize" ) )
self.assertTrue(hasattr(snake_case , "image_mean" ) )
self.assertTrue(hasattr(snake_case , "image_std" ) )
self.assertTrue(hasattr(snake_case , "do_convert_rgb" ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : int = self.image_processor_tester.prepare_inputs(equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
A_ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
A_ : Optional[int] = image_processing(snake_case , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 300
|
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Optional[int] ) -> str:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] ) -> Optional[int]:
A_ : Tuple = tmp_path / "cache"
A_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Optional[Any] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] ) -> str:
A_ : List[Any] = tmp_path / "cache"
A_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : int = features.copy() if features else default_expected_features
A_ : str = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = ParquetDatasetReader(_lowerCAmelCase , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any ) -> Optional[Any]:
A_ : Dict = tmp_path / "cache"
A_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase , split=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] ) -> List[str]:
if issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : int = parquet_path
elif issubclass(_lowerCAmelCase , _lowerCAmelCase ):
A_ : Optional[int] = [parquet_path]
A_ : Optional[int] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Optional[int] = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_dataset(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Union[str, Any]=("train",) ) -> Tuple:
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
for split in splits:
A_ : List[str] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict ) -> Optional[int]:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_lowerCAmelCase , keep_in_memory=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : str ) -> Tuple:
A_ : Optional[Any] = tmp_path / "cache"
A_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : List[str] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_lowerCAmelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any ) -> Union[str, Any]:
if split:
A_ : Any = {split: parquet_path}
else:
A_ : Optional[Any] = "train"
A_ : str = {"train": parquet_path, "test": parquet_path}
A_ : Any = tmp_path / "cache"
A_ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A_ : Dict = ParquetDatasetReader(_lowerCAmelCase , cache_dir=_lowerCAmelCase ).read()
_check_parquet_datasetdict(_lowerCAmelCase , _lowerCAmelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __snake_case ( _lowerCAmelCase : Dict , _lowerCAmelCase : Optional[Any] ) -> Dict:
A_ : List[str] = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet" )
A_ : Dict = pf.read()
assert dataset.data.table == output_table
def __snake_case ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : int ) -> List[Any]:
A_ : Tuple = str(shared_datadir / "test_image_rgb.jpg" )
A_ : int = {"image": [image_path]}
A_ : Optional[Any] = Features({"image": Image()} )
A_ : Union[str, Any] = Dataset.from_dict(_lowerCAmelCase , features=_lowerCAmelCase )
A_ : Tuple = ParquetDatasetWriter(_lowerCAmelCase , tmp_path / "foo.parquet" )
assert writer.write() > 0
A_ : str = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
A_ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_lowerCAmelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> Any:
assert get_writer_batch_size(_lowerCAmelCase ) == expected
| 300
| 1
|
'''simple docstring'''
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Tuple = False
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : Optional[Any] ):
A = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
A = VersatileDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = generator.manual_seed(0 )
A = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def A (self : str ):
A = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = """cyberpunk 2077"""
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
A = torch.manual_seed(0 )
A = pipe.dual_guided(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = """A painting of a squirrel eating a burger """
A = torch.manual_seed(0 )
A = pipe.text_to_image(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
A = pipe.image_variation(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""numpy""" ).images
A = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 337
|
'''simple docstring'''
_lowerCamelCase : List[Any] = 'Input must be a string of 8 numbers plus letter'
_lowerCamelCase : str = 'TRWAGMYFPDXBNJZSQVHLCKE'
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
A = f"""Expected string as input, found {type(UpperCAmelCase ).__name__}"""
raise TypeError(UpperCAmelCase )
A = spanish_id.replace("""-""" , """""" ).upper()
if len(UpperCAmelCase ) != 9:
raise ValueError(UpperCAmelCase )
try:
A = int(spanish_id_clean[0:8] )
A = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(UpperCAmelCase ) from ex
if letter.isdigit():
raise ValueError(UpperCAmelCase )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 337
| 1
|
"""simple docstring"""
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = (PNDMScheduler,)
_lowerCamelCase: Tuple = (('''num_inference_steps''', 50),)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,**A_ : List[Any] ) -> Union[str, Any]:
A = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**A_ )
return config
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=0 ,**A_ : Optional[int] ) -> Optional[Any]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,A_ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config(**A_ )
A = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
A = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
A = dummy_past_residuals[:]
A = scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample
A = new_scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample
A = new_scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any]=0 ,**A_ : List[str] ) -> Tuple:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,A_ )
A = self.dummy_sample
A = 0.1 * sample
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
A = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
A = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
A = dummy_past_residuals[:]
A = scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample
A = new_scheduler.step_prk(A_ ,A_ ,A_ ,**A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
A = scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample
A = new_scheduler.step_plms(A_ ,A_ ,A_ ,**A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : List[Any] ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(**A_ )
A = scheduler_class(**A_ )
A = 10
A = self.dummy_model()
A = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
A = model(A_ ,A_ )
A = scheduler.step_prk(A_ ,A_ ,A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
A = model(A_ ,A_ )
A = scheduler.step_plms(A_ ,A_ ,A_ ).prev_sample
return sample
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = dict(self.forward_default_kwargs )
A = kwargs.pop('num_inference_steps' ,A_ )
for scheduler_class in self.scheduler_classes:
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = self.dummy_sample
A = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ ,'set_timesteps' ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ ,'set_timesteps' ):
A = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
A = dummy_past_residuals[:]
A = scheduler.step_prk(A_ ,0 ,A_ ,**A_ ).prev_sample
A = scheduler.step_prk(A_ ,1 ,A_ ,**A_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
A = scheduler.step_plms(A_ ,0 ,A_ ,**A_ ).prev_sample
A = scheduler.step_plms(A_ ,1 ,A_ ,**A_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
A = self.scheduler_classes[0]
A = self.get_scheduler_config(steps_offset=1 )
A = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps ,torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
for beta_start, beta_end in zip([0.00_01, 0.0_01] ,[0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ ,beta_end=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
for t, num_inference_steps in zip([1, 5, 10] ,[10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
A = 27
for scheduler_class in self.scheduler_classes:
A = self.dummy_sample
A = 0.1 * sample
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
A = scheduler.step_prk(A_ ,A_ ,A_ ).prev_sample
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
with self.assertRaises(A_ ):
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample ,1 ,self.dummy_sample ).prev_sample
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = self.full_loop()
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = self.full_loop(prediction_type='v_prediction' )
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
# We specify different beta, so that the first alpha is 0.99
A = self.full_loop(set_alpha_to_one=A_ ,beta_start=0.01 )
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
A = self.full_loop(set_alpha_to_one=A_ ,beta_start=0.01 )
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 74
|
"""simple docstring"""
import argparse
import struct
import unittest
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : bytes ) -> None:
A = data
# Initialize hash values
A = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
A = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
A = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : bytes ) -> bytes:
A = B'\x80' + (B'\x00' * (63 - (len(A_ ) + 8) % 64))
A = struct.pack('>Q' ,(len(A_ ) * 8) )
return data + padding + big_endian_integer
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
# Convert into blocks of 64 bytes
A = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
A = list(struct.unpack('>16L' ,A_ ) )
# add 48 0-ed integers
words += [0] * 48
A , A , A , A , A , A , A , A = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
A = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
A = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
A = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
A = self.ror(A_ ,6 ) ^ self.ror(A_ ,11 ) ^ self.ror(A_ ,25 )
A = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
A = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
A = self.ror(A_ ,2 ) ^ self.ror(A_ ,13 ) ^ self.ror(A_ ,22 )
A = (a & b) ^ (a & c) ^ (b & c)
A = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
A , A , A , A , A , A , A , A = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
A = [a, b, c, d, e, f, g, h]
# Modify final values
A = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
A = ''.join([hex(A_ )[2:].zfill(8 ) for value in self.hashes] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> None:
import hashlib
A = bytes('Test String' ,'utf-8' )
self.assertEqual(SHAaaa(A_ ).hash ,hashlib.shaaaa(A_ ).hexdigest() )
def _snake_case ( ):
import doctest
doctest.testmod()
A = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
A = parser.parse_args()
A = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
A = f.read()
else:
A = bytes(snake_case__ , 'utf-8' )
print(SHAaaa(snake_case__ ).hash )
if __name__ == "__main__":
main()
| 74
| 1
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {"vocab_file": "vocab.json"}
snake_case_ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
snake_case_ = {"mgp-str": 27}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : List[str]="[GO]" ,lowerCamelCase__ : Union[str, Any]="[GO]" ,lowerCamelCase__ : Dict="[s]" ,lowerCamelCase__ : List[str]="[GO]" ,**lowerCamelCase__ : Any ):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,**_A ,)
with open(_A ,encoding='utf-8' ) as vocab_handle:
_UpperCamelCase : Optional[int] = json.load(_A )
_UpperCamelCase : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return len(self.vocab )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return dict(self.vocab ,**self.added_tokens_encoder )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = []
for s in text:
char_tokens.extend(_A )
return char_tokens
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Dict ):
'''simple docstring'''
return self.vocab.get(_A ,self.vocab.get(self.unk_token ) )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.decoder.get(_A )
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error('Vocabulary path ({}) should be a directory'.format(_A ) )
return
_UpperCamelCase : Optional[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
return (vocab_file,)
| 357
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Any = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236
| 0
|
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase_ : list[bool | None] = [None] * 10_00_00_00
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Tuple = False
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ : Optional[int] = chain(next_number(_UpperCAmelCase ) )
A_ : List[str] = number_chain
while number < 10000000:
A_ : List[Any] = number_chain
number *= 10
return number_chain
def UpperCAmelCase__ ( _UpperCAmelCase = 10000000 ):
"""simple docstring"""
for i in range(1 , _UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(_UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"{solution() = }")
| 286
|
"""simple docstring"""
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase_ : str = logging.get_logger(__name__)
@add_end_docstrings(
UpperCAmelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : List[str] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ )
else:
raise ValueError('Unsupported framework' )
return masked_index
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : List[str] = self.get_masked_index(snake_case_ )
A_ : str = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['input_ids'][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , **snake_case_ ):
"""simple docstring"""
if return_tensors is None:
A_ : Any = self.framework
A_ : Dict = self.tokenizer(snake_case_ , return_tensors=snake_case_ )
self.ensure_exactly_one_mask_token(snake_case_ )
return model_inputs
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
A_ : Dict = self.model(**snake_case_ )
A_ : Optional[int] = model_inputs['input_ids']
return model_outputs
def lowerCamelCase_ ( self , snake_case_ , snake_case_=5 , snake_case_=None ):
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : str = target_ids.shape[0]
A_ : Optional[Any] = model_outputs['input_ids'][0]
A_ : List[Any] = model_outputs['logits']
if self.framework == "tf":
A_ : List[str] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : Optional[int] = outputs[0, masked_index, :]
A_ : Optional[Any] = stable_softmax(snake_case_ , axis=-1 )
if target_ids is not None:
A_ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case_ , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Optional[int] = tf.expand_dims(snake_case_ , 0 )
A_ : Any = tf.math.top_k(snake_case_ , k=snake_case_ )
A_ , A_ : str = topk.values.numpy(), topk.indices.numpy()
else:
A_ : int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case_ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : Tuple = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : str = probs[..., target_ids]
A_ , A_ : List[str] = probs.topk(snake_case_ )
A_ : List[Any] = []
A_ : int = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : str = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : str = target_ids[p].tolist()
A_ : Union[str, Any] = p
# Filter padding out:
A_ : Any = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : Any = self.tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
A_ : Any = {'score': v, 'token': p, 'token_str': self.tokenizer.decode([p] ), 'sequence': sequence}
row.append(snake_case_ )
result.append(snake_case_ )
if single_mask:
return result[0]
return result
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None ):
"""simple docstring"""
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : int = {}
A_ : Tuple = []
for target in targets:
A_ : int = vocab.get(snake_case_ , snake_case_ )
if id_ is None:
A_ : Tuple = self.tokenizer(
snake_case_ , add_special_tokens=snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , max_length=1 , truncation=snake_case_ , )['input_ids']
if len(snake_case_ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'We cannot replace it with anything meaningful, ignoring it' )
continue
A_ : str = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
A_ : Tuple = list(set(snake_case_ ) )
if len(snake_case_ ) == 0:
raise ValueError('At least one target must be provided when passed.' )
A_ : Optional[Any] = np.array(snake_case_ )
return target_ids
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=None ):
"""simple docstring"""
A_ : List[str] = {}
if targets is not None:
A_ : Any = self.get_target_ids(snake_case_ , snake_case_ )
A_ : Optional[Any] = target_ids
if top_k is not None:
A_ : int = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'fill-mask' , self.model.base_model_prefix , 'The tokenizer does not define a `mask_token`.' )
return {}, {}, postprocess_params
def __call__( self , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
A_ : List[str] = super().__call__(snake_case_ , **snake_case_ )
if isinstance(snake_case_ , snake_case_ ) and len(snake_case_ ) == 1:
return outputs[0]
return outputs
| 286
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json',
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a : List[Any] = "dinat"
a : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self, __magic_name__=4, __magic_name__=3, __magic_name__=64, __magic_name__=[3, 4, 6, 5], __magic_name__=[2, 4, 8, 16], __magic_name__=7, __magic_name__=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]], __magic_name__=3.0, __magic_name__=True, __magic_name__=0.0, __magic_name__=0.0, __magic_name__=0.1, __magic_name__="gelu", __magic_name__=0.02, __magic_name__=1E-5, __magic_name__=0.0, __magic_name__=None, __magic_name__=None, **__magic_name__, ) -> List[str]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = patch_size
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : Optional[int] = embed_dim
UpperCamelCase__ : str = depths
UpperCamelCase__ : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = num_heads
UpperCamelCase__ : List[Any] = kernel_size
UpperCamelCase__ : List[Any] = dilations
UpperCamelCase__ : Any = mlp_ratio
UpperCamelCase__ : Dict = qkv_bias
UpperCamelCase__ : int = hidden_dropout_prob
UpperCamelCase__ : Dict = attention_probs_dropout_prob
UpperCamelCase__ : Any = drop_path_rate
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase__ : str = int(embed_dim * 2 ** (len(_SCREAMING_SNAKE_CASE ) - 1) )
UpperCamelCase__ : Optional[int] = layer_scale_init_value
UpperCamelCase__ : Optional[int] = ["stem"] + [f"stage{idx}" for idx in range(1, len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCamelCase__ : Any = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE, out_indices=_SCREAMING_SNAKE_CASE, stage_names=self.stage_names )
| 364
|
from collections import deque
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Optional[int] = len(__UpperCAmelCase )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : int = [False for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : Optional[int] = [-1 for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : str = index_of[:]
def strong_connect(__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] ):
UpperCamelCase__ : str = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : str = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = stack.pop()
UpperCamelCase__ : int = False
component.append(__UpperCAmelCase )
while w != v:
UpperCamelCase__ : int = stack.pop()
UpperCamelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> str:
UpperCamelCase__ : Dict = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ = 7
UpperCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ["""GLPNFeatureExtractor"""]
lowerCAmelCase = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126
|
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 126
| 1
|
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = emb.weight.shape
A__ = nn.Linear(__UpperCAmelCase , __UpperCAmelCase , bias=__UpperCAmelCase )
A__ = emb.weight.data
return lin_layer
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__="facebook/mbart-large-en-ro" , UpperCamelCase__=False , UpperCamelCase__=False ):
"""simple docstring"""
A__ = torch.load(__UpperCAmelCase , map_location='cpu' )['''model''']
remove_ignore_keys_(__UpperCAmelCase )
A__ = state_dict['''encoder.embed_tokens.weight'''].shape[0]
A__ = MBartConfig.from_pretrained(__UpperCAmelCase , vocab_size=__UpperCAmelCase )
if mbart_aa and finetuned:
A__ = '''relu'''
A__ = state_dict['''decoder.embed_tokens.weight''']
A__ = MBartForConditionalGeneration(__UpperCAmelCase )
model.model.load_state_dict(__UpperCAmelCase )
if finetuned:
A__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 352
|
"""simple docstring"""
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCamelCase__( unittest.TestCase ):
def snake_case__ ( self ) -> Optional[int]:
A__ = inspect.getfile(accelerate.test_utils )
A__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
A__ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def snake_case__ ( self ) -> int:
A__ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
A__ = [sys.executable] + distributed_args
execute_subprocess_async(__UpperCAmelCase ,env=os.environ.copy() )
| 154
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__a = False
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
lowercase : Union[str, Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : int = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = VersatileDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = generator.manual_seed(0 )
lowercase : Tuple = pipe.dual_guided(
prompt='''first prompt''' , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self ):
lowercase : Optional[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
lowercase : str = '''cyberpunk 2077'''
lowercase : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase : Optional[int] = torch.manual_seed(0 )
lowercase : List[Any] = pipe.dual_guided(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , text_to_image_strength=0.75 , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
lowercase : List[str] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Any = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Optional[int] = '''A painting of a squirrel eating a burger '''
lowercase : int = torch.manual_seed(0 )
lowercase : int = pipe.text_to_image(
prompt=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
lowercase : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : List[str] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase : Tuple = pipe.image_variation(SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , output_type='''numpy''' ).images
lowercase : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 337
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
__a = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->Optional[int]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_UpperCamelCase ), version.parse(_UpperCamelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowercase ( _UpperCamelCase, _UpperCamelCase = None ) ->None:
"""simple docstring"""
lowercase : List[Any] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''', _UpperCamelCase ):
lowercase , lowercase , lowercase : Optional[Any] = requirement, None, None
else:
lowercase : List[Any] = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
lowercase , lowercase : str = match[0]
lowercase : Tuple = want_full.split(''',''' ) # there could be multiple requirements
lowercase : List[Any] = {}
for w in want_range:
lowercase : str = re.findall(R'''^([\s!=<>]{1,2})(.+)''', _UpperCamelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
lowercase , lowercase : Optional[int] = match[0]
lowercase : Dict = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase : int = '''.'''.join([str(_UpperCamelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
return
# check if any version is installed
try:
lowercase : List[str] = importlib.metadata.version(_UpperCamelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
def __lowercase ( _UpperCamelCase ) ->int:
"""simple docstring"""
lowercase : Optional[int] = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(_UpperCamelCase, _UpperCamelCase )
| 337
| 1
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)])
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a)
lowercase__ : Tuple = GenerationConfig.from_pretrained(a , config_name=a)
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a)
self.assertEqual(loaded_config.temperature , 0.7)
self.assertEqual(loaded_config.length_penalty , 1.0)
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]])
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50)
self.assertEqual(loaded_config.max_length , 20)
self.assertEqual(loaded_config.max_time , a)
def snake_case_ ( self):
lowercase__ : int = AutoConfig.from_pretrained('gpt2')
lowercase__ : List[Any] = GenerationConfig.from_model_config(a)
lowercase__ : Any = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a)
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id)
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id)
def snake_case_ ( self):
lowercase__ : List[str] = GenerationConfig()
lowercase__ : Optional[Any] = {
'max_new_tokens': 1024,
'foo': 'bar',
}
lowercase__ : Optional[Any] = copy.deepcopy(a)
lowercase__ : Optional[Any] = generation_config.update(**a)
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024)
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'})
def snake_case_ ( self):
lowercase__ : Tuple = GenerationConfig()
lowercase__ : List[Any] = 'bar'
with tempfile.TemporaryDirectory('test-generation-config') as tmp_dir:
generation_config.save_pretrained(a)
lowercase__ : List[str] = GenerationConfig.from_pretrained(a)
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar')
lowercase__ : Optional[Any] = GenerationConfig.from_model_config(a)
assert not hasattr(a , 'foo') # no new kwargs should be initialized if from config
def snake_case_ ( self):
lowercase__ : Tuple = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0)
self.assertEqual(default_config.do_sample , a)
self.assertEqual(default_config.num_beams , 1)
lowercase__ : str = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7)
self.assertEqual(config.do_sample , a)
self.assertEqual(config.num_beams , 1)
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a)
lowercase__ : Optional[Any] = GenerationConfig.from_pretrained(a , temperature=1.0)
self.assertEqual(loaded_config.temperature , 1.0)
self.assertEqual(loaded_config.do_sample , a)
self.assertEqual(loaded_config.num_beams , 1) # default value
@is_staging_test
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
@classmethod
def snake_case_ ( cls):
lowercase__ : int = TOKEN
HfFolder.save_token(a)
@classmethod
def snake_case_ ( cls):
try:
delete_repo(token=cls._token , repo_id='test-generation-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org')
except HTTPError:
pass
def snake_case_ ( self):
lowercase__ : List[str] = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token)
lowercase__ : str = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a))
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token)
lowercase__ : Any = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a))
def snake_case_ ( self):
lowercase__ : Dict = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token)
lowercase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a))
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token)
lowercase__ : List[str] = GenerationConfig.from_pretrained('valid_org/test-generation-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a))
| 356
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
snake_case_ = {
'''gpt2''': 1_024,
'''gpt2-medium''': 1_024,
'''gpt2-large''': 1_024,
'''gpt2-xl''': 1_024,
'''distilgpt2''': 1_024,
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ["""input_ids""", """attention_mask"""]
__lowerCamelCase : str = GPTaTokenizer
def __init__( self , a=None , a=None , a=None , a="<|endoftext|>" , a="<|endoftext|>" , a="<|endoftext|>" , a=False , **a , ):
super().__init__(
a , a , tokenizer_file=a , unk_token=a , bos_token=a , eos_token=a , add_prefix_space=a , **a , )
lowercase__ : int = kwargs.pop('add_bos_token' , a)
lowercase__ : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , a) != add_prefix_space:
lowercase__ : Optional[Any] = getattr(a , pre_tok_state.pop('type'))
lowercase__ : List[Any] = add_prefix_space
lowercase__ : str = pre_tok_class(**a)
lowercase__ : Tuple = add_prefix_space
def snake_case_ ( self , *a , **a):
lowercase__ : Tuple = kwargs.get('is_split_into_words' , a)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*a , **a)
def snake_case_ ( self , *a , **a):
lowercase__ : Optional[Any] = kwargs.get('is_split_into_words' , a)
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*a , **a)
def snake_case_ ( self , a , a = None):
lowercase__ : Any = self._tokenizer.model.save(a , name=a)
return tuple(a)
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a) + [self.eos_token_id])
if len(a) > self.model_max_length:
lowercase__ : Union[str, Any] = input_ids[-self.model_max_length :]
return input_ids
| 216
| 0
|
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_a : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCamelCase ( lowerCAmelCase_ ) -> int:
_a : Optional[int] = 0
_a : str = 2
while digits < n:
index += 1
_a : Optional[int] = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def __lowerCamelCase ( lowerCAmelCase_ = 1000 ) -> int:
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 89
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[str] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Any = {
"camembert-base": 512,
}
_UpperCAmelCase : List[Any] = "▁"
class __lowerCAmelCase ( lowerCAmelCase):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ['''input_ids''', '''attention_mask''']
def __init__( self: List[str] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Dict="<s>" , _lowerCAmelCase: Union[str, Any]="</s>" , _lowerCAmelCase: Optional[int]="</s>" , _lowerCAmelCase: List[Any]="<s>" , _lowerCAmelCase: Tuple="<unk>" , _lowerCAmelCase: Union[str, Any]="<pad>" , _lowerCAmelCase: str="<mask>" , _lowerCAmelCase: int=["<s>NOTUSED", "</s>NOTUSED"] , _lowerCAmelCase: Optional[Dict[str, Any]] = None , **_lowerCAmelCase: Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowercase :Dict = AddedToken(_lowerCAmelCase , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCAmelCase , eos_token=_lowerCAmelCase , unk_token=_lowerCAmelCase , sep_token=_lowerCAmelCase , cls_token=_lowerCAmelCase , pad_token=_lowerCAmelCase , mask_token=_lowerCAmelCase , additional_special_tokens=_lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCAmelCase , )
lowercase :Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCAmelCase ) )
lowercase :Tuple = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowercase :int = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowercase :Tuple = len(self.fairseq_tokens_to_ids )
lowercase :int = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowercase :Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self: Tuple , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase :List[Any] = [self.cls_token_id]
lowercase :List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None , _lowerCAmelCase: bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCAmelCase , token_ids_a=_lowerCAmelCase , already_has_special_tokens=_lowerCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCAmelCase )) + [1]
return [1] + ([0] * len(_lowerCAmelCase )) + [1, 1] + ([0] * len(_lowerCAmelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: List[int] , _lowerCAmelCase: Optional[List[int]] = None ):
lowercase :Any = [self.sep_token_id]
lowercase :str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :Any = {self.convert_ids_to_tokens(_lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: str ):
return self.sp_model.encode(_lowerCAmelCase , out_type=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: List[str] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_lowerCAmelCase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE ( self: List[Any] , _lowerCAmelCase: List[str] ):
lowercase :Tuple = []
lowercase :Any = ""
lowercase :str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_lowerCAmelCase ) + token
lowercase :Optional[int] = True
lowercase :Any = []
else:
current_sub_tokens.append(_lowerCAmelCase )
lowercase :str = False
out_string += self.sp_model.decode(_lowerCAmelCase )
return out_string.strip()
def __getstate__( self: Dict ):
lowercase :int = self.__dict__.copy()
lowercase :List[str] = None
return state
def __setstate__( self: Optional[Any] , _lowerCAmelCase: Dict ):
lowercase :int = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase :Optional[int] = {}
lowercase :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self: Optional[int] , _lowerCAmelCase: str , _lowerCAmelCase: Optional[str] = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase :Any = os.path.join(
_lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCAmelCase , "wb" ) as fi:
lowercase :Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCAmelCase )
return (out_vocab_file,)
| 236
| 0
|
import copy
import re
class UpperCAmelCase_ :
lowerCamelCase__ = """hp"""
lowerCamelCase__ = {}
lowerCamelCase__ = None
@classmethod
def snake_case__ ( cls, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = prefix
_lowerCAmelCase : Optional[int] = defaults
cls.build_naming_info()
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
if len(lowerCAmelCase__) == 0:
return ""
_lowerCAmelCase : str = None
if any(char.isdigit() for char in word):
raise Exception(f"Parameters should not contain numbers: \'{word}\' contains a number")
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1, len(lowerCAmelCase__) + 1):
_lowerCAmelCase : Dict = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_lowerCAmelCase : Union[str, Any] = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(__a):
_lowerCAmelCase : List[str] = ""
while integer != 0:
_lowerCAmelCase : Any = chr(ord("A") + integer % 10) + s
integer //= 10
return s
_lowerCAmelCase : Dict = 0
while True:
_lowerCAmelCase : int = word + "#" + int_to_alphabetic(lowerCAmelCase__)
if sword in info["reverse_short_word"]:
continue
else:
_lowerCAmelCase : Dict = sword
break
_lowerCAmelCase : int = short_word
_lowerCAmelCase : int = word
return short_word
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = param_name.split("_")
_lowerCAmelCase : List[str] = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_lowerCAmelCase : Dict = ["", "_"]
for separator in separators:
_lowerCAmelCase : str = separator.join(lowerCAmelCase__)
if shortname not in info["reverse_short_param"]:
_lowerCAmelCase : Optional[Any] = shortname
_lowerCAmelCase : List[str] = param_name
return shortname
return param_name
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__)
_lowerCAmelCase : int = short_name
_lowerCAmelCase : List[Any] = param_name
@classmethod
def snake_case__ ( cls):
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_lowerCAmelCase : Tuple = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_lowerCAmelCase : Optional[int] = list(cls.DEFAULTS.keys())
for k in field_keys:
cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__)
_lowerCAmelCase : List[str] = info
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_lowerCAmelCase : str = [copy.copy(cls.PREFIX)]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"You should provide a default value for the param name {k} with value {v}")
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_lowerCAmelCase : List[Any] = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCAmelCase__, lowerCAmelCase__):
_lowerCAmelCase : List[str] = 1 if v else 0
_lowerCAmelCase : int = "" if isinstance(lowerCAmelCase__, (int, float)) else "-"
_lowerCAmelCase : Dict = f"{key}{sep}{v}"
name.append(lowerCAmelCase__)
return "_".join(lowerCAmelCase__)
@classmethod
def snake_case__ ( cls, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = repr[len(cls.PREFIX) + 1 :]
if repr == "":
_lowerCAmelCase : Optional[int] = []
else:
_lowerCAmelCase : List[str] = repr.split("_")
_lowerCAmelCase : str = {}
for value in values:
if "-" in value:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = value.split("-")
else:
_lowerCAmelCase : Tuple = re.sub("[0-9.]", "", lowerCAmelCase__)
_lowerCAmelCase : Union[str, Any] = float(re.sub("[^0-9.]", "", lowerCAmelCase__))
_lowerCAmelCase : Any = cls.NAMING_INFO["reverse_short_param"][p_k]
_lowerCAmelCase : Optional[Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_lowerCAmelCase : List[Any] = cls.DEFAULTS[k]
return parameters
| 366
|
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__lowerCamelCase : str = random.Random()
if is_torch_available():
import torch
def A_ ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> Optional[Any]:
if rng is None:
UpperCamelCase : Optional[int] = global_rng
UpperCamelCase : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=1 , A_=0.0 , A_=1_6000 , A_=True , A_=True , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : List[Any] = batch_size
UpperCamelCase : List[Any] = min_seq_length
UpperCamelCase : List[str] = max_seq_length
UpperCamelCase : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase : Union[str, Any] = feature_size
UpperCamelCase : List[str] = padding_value
UpperCamelCase : Optional[Any] = sampling_rate
UpperCamelCase : List[str] = return_attention_mask
UpperCamelCase : List[Any] = do_normalize
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __UpperCamelCase( self , A_=False , A_=False ):
'''simple docstring'''
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
UpperCamelCase : List[str] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
UpperCamelCase : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase : Union[str, Any] = [np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :Optional[Any] = ASTFeatureExtractor
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = ASTFeatureExtractionTester(self )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase : Dict = [np.asarray(A_ ) for speech_input in speech_inputs]
# Test not batched input
UpperCamelCase : Dict = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
UpperCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test batched
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
UpperCamelCase : Any = feat_extract(A_ , padding=A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase : int = np.asarray(A_ )
UpperCamelCase : Any = feat_extract(A_ , return_tensors="np" ).input_values
UpperCamelCase : List[str] = feat_extract(A_ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1e-3 ) )
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
import torch
UpperCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase : int = np.random.rand(100 ).astype(np.floataa )
UpperCamelCase : str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase : List[Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
UpperCamelCase : List[str] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
from datasets import load_dataset
UpperCamelCase : Dict = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase : Any = ds.sort("id" ).select(range(A_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Any = torch.tensor(
[-0.98_94, -1.27_76, -0.90_66, -1.27_76, -0.93_49, -1.26_09, -1.03_86, -1.27_76,
-1.15_61, -1.27_76, -1.20_52, -1.27_23, -1.21_90, -1.21_32, -1.27_76, -1.11_33,
-1.19_53, -1.13_43, -1.15_84, -1.22_03, -1.17_70, -1.24_74, -1.23_81, -1.19_36,
-0.92_70, -0.83_17, -0.80_49, -0.77_06, -0.75_65, -0.78_69] )
# fmt: on
UpperCamelCase : List[Any] = self._load_datasamples(1 )
UpperCamelCase : Tuple = ASTFeatureExtractor()
UpperCamelCase : str = feature_extractor(A_ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , A_ , atol=1e-4 ) )
| 52
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''megatron-bert'''
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=29_056 , snake_case_ : int=1_024 , snake_case_ : Optional[int]=24 , snake_case_ : str=16 , snake_case_ : str=4_096 , snake_case_ : Tuple="gelu" , snake_case_ : List[str]=0.1 , snake_case_ : Optional[int]=0.1 , snake_case_ : List[str]=512 , snake_case_ : Optional[int]=2 , snake_case_ : Dict=0.02 , snake_case_ : Optional[Any]=1e-12 , snake_case_ : Optional[Any]=0 , snake_case_ : int="absolute" , snake_case_ : List[str]=True , **snake_case_ : Tuple , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
| 247
| 0
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : int = 4 ):
__lowercase = abs(SCREAMING_SNAKE_CASE_ ) or 4
return [[1 + x + y * row_size for x in range(SCREAMING_SNAKE_CASE_ )] for y in range(SCREAMING_SNAKE_CASE_ )]
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
return reverse_row(reverse_column(SCREAMING_SNAKE_CASE_ ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
return reverse_column(transpose(SCREAMING_SNAKE_CASE_ ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
__lowercase = [list(SCREAMING_SNAKE_CASE_ ) for x in zip(*SCREAMING_SNAKE_CASE_ )]
return matrix
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
__lowercase = matrix[::-1]
return matrix
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
__lowercase = [x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( lowerCamelCase_ : list[list[int]] ):
for i in matrix:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 364
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_SCREAMING_SNAKE_CASE = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE = {
'''vocab_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'''
),
'''google/electra-base-generator''': '''https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt''',
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''google/electra-small-generator''': (
'''https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'''
),
'''google/electra-base-generator''': (
'''https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'''
),
'''google/electra-large-generator''': (
'''https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'''
),
'''google/electra-small-discriminator''': (
'''https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-base-discriminator''': (
'''https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'''
),
'''google/electra-large-discriminator''': (
'''https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': 5_1_2,
'''google/electra-base-generator''': 5_1_2,
'''google/electra-large-generator''': 5_1_2,
'''google/electra-small-discriminator''': 5_1_2,
'''google/electra-base-discriminator''': 5_1_2,
'''google/electra-large-discriminator''': 5_1_2,
}
_SCREAMING_SNAKE_CASE = {
'''google/electra-small-generator''': {'''do_lower_case''': True},
'''google/electra-base-generator''': {'''do_lower_case''': True},
'''google/electra-large-generator''': {'''do_lower_case''': True},
'''google/electra-small-discriminator''': {'''do_lower_case''': True},
'''google/electra-base-discriminator''': {'''do_lower_case''': True},
'''google/electra-large-discriminator''': {'''do_lower_case''': True},
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : str = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : int = PRETRAINED_INIT_CONFIGURATION
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[Any] = ElectraTokenizer
def __init__(self ,_lowerCamelCase=None ,_lowerCamelCase=None ,_lowerCamelCase=True ,_lowerCamelCase="[UNK]" ,_lowerCamelCase="[SEP]" ,_lowerCamelCase="[PAD]" ,_lowerCamelCase="[CLS]" ,_lowerCamelCase="[MASK]" ,_lowerCamelCase=True ,_lowerCamelCase=None ,**_lowerCamelCase ,) -> List[Any]:
'''simple docstring'''
super().__init__(
_lowerCamelCase ,tokenizer_file=_lowerCamelCase ,do_lower_case=_lowerCamelCase ,unk_token=_lowerCamelCase ,sep_token=_lowerCamelCase ,pad_token=_lowerCamelCase ,cls_token=_lowerCamelCase ,mask_token=_lowerCamelCase ,tokenize_chinese_chars=_lowerCamelCase ,strip_accents=_lowerCamelCase ,**_lowerCamelCase ,)
__lowercase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' ,_lowerCamelCase ) != do_lower_case
or normalizer_state.get('''strip_accents''' ,_lowerCamelCase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' ,_lowerCamelCase ) != tokenize_chinese_chars
):
__lowercase = getattr(_lowerCamelCase ,normalizer_state.pop('''type''' ) )
__lowercase = do_lower_case
__lowercase = strip_accents
__lowercase = tokenize_chinese_chars
__lowercase = normalizer_class(**_lowerCamelCase )
__lowercase = do_lower_case
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=None ) -> str:
'''simple docstring'''
__lowercase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
__lowercase = self._tokenizer.model.save(_lowerCamelCase ,name=_lowerCamelCase )
return tuple(_lowerCamelCase )
| 217
| 0
|
from __future__ import annotations
from math import pi, sqrt
def __UpperCamelCase ( _A : float , _A : float ) ->tuple:
"""simple docstring"""
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 154
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : int = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Dict = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 154
| 1
|
"""simple docstring"""
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
UpperCAmelCase__ = datasets.logging.get_logger(__name__)
UpperCAmelCase__ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n'
UpperCAmelCase__ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n'
UpperCAmelCase__ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n'
UpperCAmelCase__ = {
'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip',
'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip',
'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip',
'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip',
'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip',
'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip',
'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip',
'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip',
'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip',
'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase__ ( datasets.Metric ):
def lowercase ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowercase ( self : Any , _lowerCamelCase : Union[str, Any] ):
# check that config name specifies a valid BLEURT model
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
_snake_case = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
_snake_case = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
_snake_case = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
_snake_case = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
_snake_case = score.BleurtScorer(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ):
_snake_case = self.scorer.score(references=_SCREAMING_SNAKE_CASE , candidates=_SCREAMING_SNAKE_CASE )
return {"scores": scores}
| 368
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : float ) -> Optional[Any]:
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def UpperCamelCase_ ( snake_case_ : float ) -> Tuple:
'''simple docstring'''
if edge <= 0 or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
lowercase__ =['text', 'image', 'audio']
def __UpperCamelCase ( lowerCAmelCase__ : List[str] ):
__a : Optional[int] = []
for input_type in input_types:
if input_type == "text":
inputs.append('''Text input''' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png''' ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
inputs.append(create_inputs(lowerCAmelCase__ ) )
else:
raise ValueError(f"Invalid type requested: {input_type}" )
return inputs
def __UpperCamelCase ( lowerCAmelCase__ : List ):
__a : List[str] = []
for output in outputs:
if isinstance(lowerCAmelCase__ , (str, AgentText) ):
output_types.append('''text''' )
elif isinstance(lowerCAmelCase__ , (Image.Image, AgentImage) ):
output_types.append('''image''' )
elif isinstance(lowerCAmelCase__ , (torch.Tensor, AgentAudio) ):
output_types.append('''audio''' )
else:
raise ValueError(f"Invalid output: {output}" )
return output_types
@is_tool_test
class UpperCamelCase__ :
def lowerCAmelCase (self : Any ):
self.assertTrue(hasattr(self.tool , '''inputs''' ) )
self.assertTrue(hasattr(self.tool , '''outputs''' ) )
__a : Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__a : Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = create_inputs(self.tool.inputs )
__a : List[Any] = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__a : Tuple = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowerCAmelCase (self : List[Any] ):
self.assertTrue(hasattr(self.tool , '''description''' ) )
self.assertTrue(hasattr(self.tool , '''default_checkpoint''' ) )
self.assertTrue(self.tool.description.startswith('''This is a tool that''' ) )
def lowerCAmelCase (self : Any ):
__a : Any = create_inputs(self.tool.inputs )
__a : Union[str, Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__a : List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowerCAmelCase (self : Optional[int] ):
__a : Any = create_inputs(self.tool.inputs )
__a : Dict = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__a : Optional[Any] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__a : Dict = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
| 216
| 0
|
'''simple docstring'''
from math import sqrt
def _A ( _lowerCAmelCase = 1_000_000 ):
"""simple docstring"""
__lowercase =0
__lowercase =0
__lowercase =42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"{solution() = }")
| 48
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48
| 1
|
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = """#"""
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
_UpperCAmelCase : dict = {}
def _A ( self : int , A : str ):
_UpperCAmelCase : Optional[Any] = self._trie
for char in text:
if char not in trie:
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Tuple = trie[char]
_UpperCAmelCase : Tuple = True
def _A ( self : Optional[Any] , A : str ):
_UpperCAmelCase : str = self._trie
for char in prefix:
if char in trie:
_UpperCAmelCase : Optional[int] = trie[char]
else:
return []
return self._elements(A )
def _A ( self : str , A : dict ):
_UpperCAmelCase : Tuple = []
for c, v in d.items():
_UpperCAmelCase : List[Any] = [" "] if c == END else [(c + s) for s in self._elements(A )]
result.extend(A )
return tuple(A )
__SCREAMING_SNAKE_CASE : Any = Trie()
__SCREAMING_SNAKE_CASE : Optional[Any] = ("""depart""", """detergent""", """daring""", """dog""", """deer""", """deal""")
for word in words:
trie.insert_word(word)
def UpperCamelCase_ ( _UpperCAmelCase : str ) -> tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = trie.find_word(_UpperCAmelCase )
return tuple(string + word for word in suffixes )
def UpperCamelCase_ ( ) -> None:
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
def __snake_case ( _lowerCAmelCase : list ) -> list:
if len(_lowerCAmelCase ) <= 1:
return [tuple(_lowerCAmelCase )]
A_ : Tuple = []
def generate(_lowerCAmelCase : int , _lowerCAmelCase : list ):
A_ : List[str] = [0] * n
res.append(tuple(_lowerCAmelCase ) )
A_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
A_ , A_ : str = arr[i], arr[0]
else:
A_ , A_ : List[str] = arr[i], arr[c[i]]
res.append(tuple(_lowerCAmelCase ) )
c[i] += 1
A_ : Tuple = 0
else:
A_ : Dict = 0
i += 1
generate(len(_lowerCAmelCase ) , _lowerCAmelCase )
return res
if __name__ == "__main__":
_lowerCAmelCase : str = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCAmelCase : str = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 300
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Dict = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = """falcon"""
lowercase__ = ["""past_key_values"""]
def __init__( self : str , UpperCamelCase__ : List[str]=65024 , UpperCamelCase__ : Optional[int]=4544 , UpperCamelCase__ : str=32 , UpperCamelCase__ : Any=71 , UpperCamelCase__ : int=1E-5 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=False , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Optional[Any]=11 , UpperCamelCase__ : List[Any]=11 , **UpperCamelCase__ : Dict , ) -> Dict:
'''simple docstring'''
__UpperCamelCase =vocab_size
# Backward compatibility with n_embed kwarg
__UpperCamelCase =kwargs.pop('''n_embed''' , _lowercase )
__UpperCamelCase =hidden_size if n_embed is None else n_embed
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =layer_norm_epsilon
__UpperCamelCase =initializer_range
__UpperCamelCase =use_cache
__UpperCamelCase =hidden_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =bos_token_id
__UpperCamelCase =eos_token_id
__UpperCamelCase =num_attention_heads if num_kv_heads is None else num_kv_heads
__UpperCamelCase =alibi
__UpperCamelCase =new_decoder_architecture
__UpperCamelCase =multi_query # Ignored when new_decoder_architecture is True
__UpperCamelCase =parallel_attn
__UpperCamelCase =bias
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return not self.alibi
| 366
|
"""simple docstring"""
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = LongformerTokenizer
lowercase__ = True
lowercase__ = LongformerTokenizerFast
lowercase__ = True
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__UpperCamelCase =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__UpperCamelCase ={'''unk_token''': '''<unk>'''}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCamelCase__ ) )
def UpperCAmelCase_ ( self : Optional[int] , **UpperCamelCase__ : str ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , **UpperCamelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : List[str] ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase ='''lower newer'''
__UpperCamelCase ='''lower newer'''
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ='''lower newer'''
__UpperCamelCase =['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=UpperCamelCase__ ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def UpperCAmelCase_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.tokenizer_class.from_pretrained('''allenai/longformer-base-4096''' )
__UpperCamelCase =tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(UpperCamelCase__ , UpperCamelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase ='''Encode this sequence.'''
__UpperCamelCase =tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
# Testing spaces after special tokens
__UpperCamelCase ='''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )} ) # mask token has a left space
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase ='''Encode <mask> sequence'''
__UpperCamelCase ='''Encode <mask>sequence'''
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ )
__UpperCamelCase =encoded.index(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> Dict:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
__UpperCamelCase ='''A, <mask> AllenNLP sentence.'''
__UpperCamelCase =tokenizer_r.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
__UpperCamelCase =tokenizer_p.encode_plus(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
UpperCamelCase__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
__UpperCamelCase =json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''add_prefix_space'''] , UpperCamelCase__ )
self.assertEqual(post_processor_state['''trim_offsets'''] , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__UpperCamelCase ='''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f"""{text_of_1_token} {text_of_1_token}"""
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ) + 1, len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase__ ), len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ) + 1, 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
UpperCamelCase__ , use_fast=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ )
__UpperCamelCase =tokenizer_r(UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase__ ), 1 + len(UpperCamelCase__ ) + 1 + len(UpperCamelCase__ )) , )
| 85
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'trajectory_transformer'
_a = ['past_key_values']
_a = {
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , UpperCamelCase_=100 , UpperCamelCase_=5 , UpperCamelCase_=1 , UpperCamelCase_=1 , UpperCamelCase_=249 , UpperCamelCase_=6 , UpperCamelCase_=17 , UpperCamelCase_=25 , UpperCamelCase_=4 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0006 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=50256 , UpperCamelCase_=50256 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :str = action_weight
UpperCamelCase__ :Dict = reward_weight
UpperCamelCase__ :Optional[int] = value_weight
UpperCamelCase__ :List[Any] = max_position_embeddings
UpperCamelCase__ :int = block_size
UpperCamelCase__ :Optional[Any] = action_dim
UpperCamelCase__ :Union[str, Any] = observation_dim
UpperCamelCase__ :int = transition_dim
UpperCamelCase__ :int = learning_rate
UpperCamelCase__ :int = n_layer
UpperCamelCase__ :int = n_head
UpperCamelCase__ :List[Any] = n_embd
UpperCamelCase__ :Optional[int] = embd_pdrop
UpperCamelCase__ :Dict = attn_pdrop
UpperCamelCase__ :List[str] = resid_pdrop
UpperCamelCase__ :Optional[Any] = initializer_range
UpperCamelCase__ :Any = layer_norm_eps
UpperCamelCase__ :Optional[int] = kaiming_initializer_range
UpperCamelCase__ :Dict = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 97
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"microsoft/beit-base-patch16-224-pt22k": (
"https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = """beit"""
def __init__( self : List[Any] , UpperCamelCase__ : List[str]=8_1_9_2 , UpperCamelCase__ : Dict=7_6_8 , UpperCamelCase__ : List[str]=1_2 , UpperCamelCase__ : Union[str, Any]=1_2 , UpperCamelCase__ : Dict=3_0_7_2 , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Optional[Any]=1e-12 , UpperCamelCase__ : str=2_2_4 , UpperCamelCase__ : str=1_6 , UpperCamelCase__ : Union[str, Any]=3 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=[3, 5, 7, 1_1] , UpperCamelCase__ : Optional[Any]=[1, 2, 3, 6] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Tuple=0.4 , UpperCamelCase__ : Optional[Any]=2_5_6 , UpperCamelCase__ : Optional[int]=1 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[Any]=2_5_5 , **UpperCamelCase__ : Optional[int] , )-> int:
'''simple docstring'''
super().__init__(**UpperCamelCase__)
__lowerCAmelCase: str = vocab_size
__lowerCAmelCase: List[Any] = hidden_size
__lowerCAmelCase: str = num_hidden_layers
__lowerCAmelCase: Tuple = num_attention_heads
__lowerCAmelCase: Union[str, Any] = intermediate_size
__lowerCAmelCase: List[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: List[Any] = attention_probs_dropout_prob
__lowerCAmelCase: str = initializer_range
__lowerCAmelCase: Optional[Any] = layer_norm_eps
__lowerCAmelCase: Any = image_size
__lowerCAmelCase: Any = patch_size
__lowerCAmelCase: Union[str, Any] = num_channels
__lowerCAmelCase: Tuple = use_mask_token
__lowerCAmelCase: Optional[Any] = use_absolute_position_embeddings
__lowerCAmelCase: List[Any] = use_relative_position_bias
__lowerCAmelCase: Optional[Any] = use_shared_relative_position_bias
__lowerCAmelCase: List[str] = layer_scale_init_value
__lowerCAmelCase: str = drop_path_rate
__lowerCAmelCase: str = use_mean_pooling
# decode head attributes (semantic segmentation)
__lowerCAmelCase: Optional[Any] = out_indices
__lowerCAmelCase: Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
__lowerCAmelCase: List[str] = use_auxiliary_head
__lowerCAmelCase: Union[str, Any] = auxiliary_loss_weight
__lowerCAmelCase: Optional[int] = auxiliary_channels
__lowerCAmelCase: Dict = auxiliary_num_convs
__lowerCAmelCase: List[Any] = auxiliary_concat_input
__lowerCAmelCase: str = semantic_loss_ignore_index
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = version.parse("""1.11""" )
@property
def lowercase_ ( self : str)-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def lowercase_ ( self : Any)-> float:
'''simple docstring'''
return 1e-4
| 217
| 0
|
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_)
class __a (UpperCamelCase_):
'''simple docstring'''
def __init__( self , **_a ) -> str:
"""simple docstring"""
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ) -> Any:
"""simple docstring"""
return super().__call__(_a , **_a )
def _a ( self , **_a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
if "candidate_labels" in kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
SCREAMING_SNAKE_CASE__ : str = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _a ( self , _a , _a=None , _a="This is a sound of {}." ) -> List[str]:
"""simple docstring"""
if isinstance(_a , _a ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(_a ).content
else:
with open(_a , """rb""" ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read()
if isinstance(_a , _a ):
SCREAMING_SNAKE_CASE__ : Any = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ : Any = candidate_labels
SCREAMING_SNAKE_CASE__ : int = [hypothesis_template.format(_a ) for x in candidate_labels]
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = [text_inputs]
return inputs
def _a ( self , _a ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = model_inputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , _a ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_inputs[0]
else:
# Batching case.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text_inputs[0][0]
SCREAMING_SNAKE_CASE__ : int = self.model(**_a , **_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _a ( self , _a ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = model_outputs.pop("""candidate_labels""" )
SCREAMING_SNAKE_CASE__ : Any = model_outputs["""logits"""][0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Dict = logits.softmax(dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 56
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a :List[str] = logging.get_logger(__name__)
a :Union[str, Any] = {
"MIT/ast-finetuned-audioset-10-10-0.4593": (
"https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"
),
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[str] = """audio-spectrogram-transformer"""
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-1_2 , _a=16 , _a=True , _a=10 , _a=10 , _a=1_024 , _a=128 , **_a , ) -> List[Any]:
"""simple docstring"""
super().__init__(**_a )
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = qkv_bias
SCREAMING_SNAKE_CASE__ : Any = frequency_stride
SCREAMING_SNAKE_CASE__ : int = time_stride
SCREAMING_SNAKE_CASE__ : int = max_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_mel_bins
| 56
| 1
|
"""simple docstring"""
_a = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _A ( UpperCamelCase_ : List[Any], UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Tuple, UpperCamelCase_ : str) -> Any:
'''simple docstring'''
__lowercase = [False] * len(UpperCamelCase_)
__lowercase = [s]
__lowercase = True
while queue:
__lowercase = queue.pop(0)
for ind in range(len(graph[u])):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCamelCase_)
__lowercase = True
__lowercase = u
return visited[t]
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]) -> Optional[int]:
'''simple docstring'''
__lowercase = [-1] * (len(UpperCamelCase_))
__lowercase = 0
__lowercase = []
__lowercase = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_):
__lowercase = float("Inf")
__lowercase = sink
while s != source:
# Find the minimum value in select path
__lowercase = min(UpperCamelCase_, graph[parent[s]][s])
__lowercase = parent[s]
max_flow += path_flow
__lowercase = sink
while v != source:
__lowercase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowercase = parent[v]
for i in range(len(UpperCamelCase_)):
for j in range(len(graph[0])):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j))
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 17
|
"""simple docstring"""
from __future__ import annotations
class _A :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int = 0):
a : Tuple = key
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Optional[Any] = key or self.__key or 1
# make sure key is an appropriate size
key %= 255
return [chr(ord(__UpperCAmelCase) ^ key) for ch in content]
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : List[Any] = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : Any = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
a : Dict = key or self.__key or 1
# make sure key can be any size
while key > 255:
key -= 255
# This will be returned
a : str = ""
for ch in content:
ans += chr(ord(__UpperCAmelCase) ^ key)
return ans
def __snake_case ( self : int , __UpperCAmelCase : str , __UpperCAmelCase : int = 0):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("encrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
def __snake_case ( self : Any , __UpperCAmelCase : str , __UpperCAmelCase : int):
assert isinstance(__UpperCAmelCase , __UpperCAmelCase) and isinstance(__UpperCAmelCase , __UpperCAmelCase)
try:
with open(__UpperCAmelCase) as fin, open("decrypt.out" , "w+") as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(__UpperCAmelCase , __UpperCAmelCase))
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 40
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
_lowercase ,_lowercase : Tuple = shutil.get_terminal_size()
_lowercase : List[str] = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class _UpperCAmelCase ( enum.Enum ):
a__ : Optional[Any] = 0
a__ : str = 1
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Optional[Any]="" ):
sys.stdout.write(str(snake_case_ ) + end )
sys.stdout.flush()
def lowercase__ ( snake_case_ :List[str] , snake_case_ :Union[str, Any] , snake_case_ :Optional[Any]="" ):
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , snake_case_ )
def lowercase__ ( ):
forceWrite('''\r''' )
def lowercase__ ( snake_case_ :int , snake_case_ :str ):
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowercase__ ( ):
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def lowercase__ ( ):
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH )
| 86
|
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_lowercase : int = logging.get_logger(__name__)
_lowercase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase : Tuple = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
_lowercase : str = {'allegro/herbert-base-cased': 5_14}
_lowercase : Tuple = {}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = VOCAB_FILES_NAMES
a__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a__ : Any = PRETRAINED_INIT_CONFIGURATION
a__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : Optional[Any] = HerbertTokenizer
def __init__( self : List[Any] , _lowercase : Optional[int]=None , _lowercase : int=None , _lowercase : Tuple=None , _lowercase : str="<s>" , _lowercase : List[str]="<unk>" , _lowercase : int="<pad>" , _lowercase : str="<mask>" , _lowercase : List[Any]="</s>" , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , _lowercase , tokenizer_file=_lowercase , cls_token=_lowercase , unk_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , sep_token=_lowercase , **_lowercase , )
def a ( self : Optional[int] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.cls_token_id]
__UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1] + ([0] * len(_lowercase )) + [1]
def a ( self : str , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self : str , _lowercase : str , _lowercase : Optional[str] = None ):
__UpperCAmelCase = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 86
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : List[str] = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = val
def A ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowerCamelCase : Optional[int] = key.replace("backbone.0.body" ,"backbone.conv_encoder.model" )
lowerCamelCase : Optional[int] = value
else:
lowerCamelCase : List[Any] = value
return new_state_dict
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> List[str]:
lowerCamelCase : int = ""
if is_panoptic:
lowerCamelCase : Dict = "conditional_detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowerCamelCase : List[Any] = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowerCamelCase : Any = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[Any] = in_proj_weight[:256, :]
lowerCamelCase : int = in_proj_bias[:256]
lowerCamelCase : Optional[int] = in_proj_weight[256:512, :]
lowerCamelCase : int = in_proj_bias[256:512]
lowerCamelCase : Any = in_proj_weight[-256:, :]
lowerCamelCase : Dict = in_proj_bias[-256:]
def A ( ) -> List[str]:
lowerCamelCase : Tuple = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : int = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
lowerCamelCase : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowerCamelCase : str = "resnet101"
if "dc5" in model_name:
lowerCamelCase : int = True
lowerCamelCase : int = "panoptic" in model_name
if is_panoptic:
lowerCamelCase : List[str] = 250
else:
lowerCamelCase : int = 91
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : Optional[Any] = "coco-detection-id2label.json"
lowerCamelCase : Dict = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : Optional[int] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Optional[Any] = idalabel
lowerCamelCase : str = {v: k for k, v in idalabel.items()}
# load image processor
lowerCamelCase : Tuple = "coco_panoptic" if is_panoptic else "coco_detection"
lowerCamelCase : List[str] = ConditionalDetrImageProcessor(format=_SCREAMING_SNAKE_CASE )
# prepare image
lowerCamelCase : List[str] = prepare_img()
lowerCamelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="pt" )
lowerCamelCase : int = encoding["pixel_values"]
logger.info(f'''Converting model {model_name}...''' )
# load original model from torch hub
lowerCamelCase : Tuple = torch.hub.load("DeppMeng/ConditionalDETR" ,_SCREAMING_SNAKE_CASE ,pretrained=_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : List[Any] = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowerCamelCase : Optional[Any] = "conditional_detr." + src
rename_key(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE ,is_panoptic=_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowerCamelCase : Optional[int] = "conditional_detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("conditional_detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
lowerCamelCase : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowerCamelCase : Optional[int] = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
lowerCamelCase : str = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Dict = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowerCamelCase : Dict = state_dict.pop(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[str] = val
# finally, create HuggingFace model and load state dict
lowerCamelCase : List[str] = ConditionalDetrForSegmentation(_SCREAMING_SNAKE_CASE ) if is_panoptic else ConditionalDetrForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
model.push_to_hub(repo_id=_SCREAMING_SNAKE_CASE ,organization="DepuMeng" ,commit_message="Add model" )
# verify our conversion
lowerCamelCase : Any = conditional_detr(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits ,original_outputs["pred_logits"] ,atol=1e-4 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["pred_boxes"] ,atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["pred_masks"] ,atol=1e-4 )
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 48
|
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def A ( _SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowerCamelCase : List[str] = XGBRegressor(verbosity=0 ,random_state=42 )
xgb.fit(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Predict target for test data
lowerCamelCase : List[Any] = xgb.predict(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = predictions.reshape(len(_SCREAMING_SNAKE_CASE ) ,1 )
return predictions
def A ( ) -> None:
lowerCamelCase : Dict = fetch_california_housing()
lowerCamelCase , lowerCamelCase : Tuple = data_handling(_SCREAMING_SNAKE_CASE )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Optional[Any] = train_test_split(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,test_size=0.25 ,random_state=1 )
lowerCamelCase : Any = xgboost(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
print(f'''Mean Square Error : {mean_squared_error(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 48
| 1
|
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
snake_case_ : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
snake_case_ : Optional[int] = 5
snake_case_ : str = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : List[Any] = SpeechaTextTokenizer
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : Union[str, Any] = True
def snake_case_ ( self):
super().setUp()
lowercase__ : List[Any] = sp.SentencePieceProcessor()
spm_model.Load(a)
lowercase__ : Optional[Any] = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(a))]
lowercase__ : Optional[int] = dict(zip(a , range(len(a))))
lowercase__ : Dict = Path(self.tmpdirname)
save_json(a , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a , save_dir / VOCAB_FILES_NAMES['spm_file'])
lowercase__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case_ ( self):
lowercase__ : Union[str, Any] = '<pad>'
lowercase__ : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a) , a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a) , a)
def snake_case_ ( self):
lowercase__ : List[Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(a) , 1001)
def snake_case_ ( self):
self.assertEqual(self.get_tokenizer().vocab_size , 1001)
def snake_case_ ( self):
lowercase__ : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
lowercase__ : Tuple = tokenizer.tokenize('This is a test')
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a) , [289, 50, 14, 174, 386] , )
lowercase__ : str = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowercase__ : Tuple = tokenizer.convert_tokens_to_ids(a)
self.assertListEqual(a , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8])
lowercase__ : Dict = tokenizer.convert_ids_to_tokens(a)
self.assertListEqual(
a , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def snake_case_ ( self):
# fmt: off
lowercase__ : Any = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
__lowerCamelCase : int = """valhalla/s2t_mustc_multilinguial_medium"""
__lowerCamelCase : str = """C'est trop cool"""
__lowerCamelCase : Tuple = """Esto es genial"""
@classmethod
def snake_case_ ( cls):
lowercase__ : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def snake_case_ ( self):
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def snake_case_ ( self):
self.assertEqual(self.tokenizer.vocab_size , 1_0000)
def snake_case_ ( self):
self.assertIn(a , self.tokenizer.all_special_ids)
lowercase__ : Tuple = [ES_CODE, 4, 1601, 47, 7647, 2]
lowercase__ : Union[str, Any] = self.tokenizer.decode(a , skip_special_tokens=a)
lowercase__ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a)
self.assertEqual(a , a)
self.assertNotIn(self.tokenizer.eos_token , a)
def snake_case_ ( self):
lowercase__ : Any = 'fr'
lowercase__ : Union[str, Any] = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , a)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def snake_case_ ( self):
lowercase__ : List[str] = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
lowercase__ : Any = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE])
| 365
|
import math
import sys
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
lowercase__ : Tuple = [-1] * (number + 1)
lowercase__ : Tuple = 0
for i in range(1 , number + 1 ):
lowercase__ : Tuple = sys.maxsize
lowercase__ : str = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase__ : List[Any] = 1 + answers[i - (j**2)]
lowercase__ : str = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowercase_ = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowercase_ = parser.parse_args()
logger.info(f"""Loading data from {args.data_file}""")
with open(args.data_file, """rb""") as fp:
lowercase_ = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowercase_ = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase_ = [0] * args.vocab_size
for k, v in counter.items():
lowercase_ = v
logger.info(f"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 58
|
'''simple docstring'''
from __future__ import annotations
import requests
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(snake_case ).json()
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"
snake_case_ = requests.get(snake_case ).json()[:max_stories]
return [get_hackernews_story(snake_case ) for story_id in story_ids]
def UpperCamelCase_( snake_case : int = 1_0 ):
'''simple docstring'''
snake_case_ = hackernews_top_stories(snake_case )
return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 85
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337
|
'''simple docstring'''
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = credit_card_number
A = 0
A = len(UpperCAmelCase ) - 2
for i in range(UpperCAmelCase , -1 , -2 ):
# double the value of every second digit
A = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
A = cc_number[:i] + str(UpperCAmelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(UpperCAmelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = f"""{credit_card_number} is an invalid credit card number because"""
if not credit_card_number.isdigit():
print(f"""{error_message} it has nonnumerical characters.""" )
return False
if not 13 <= len(UpperCAmelCase ) <= 16:
print(f"""{error_message} of its length.""" )
return False
if not validate_initial_digits(UpperCAmelCase ):
print(f"""{error_message} of its first two digits.""" )
return False
if not luhn_validation(UpperCAmelCase ):
print(f"""{error_message} it fails the Luhn check.""" )
return False
print(f"""{credit_card_number} is a valid credit card number.""" )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 337
| 1
|
'''simple docstring'''
class a :
def __init__( self : Tuple , lowercase_ : Union[str, Any] ):
# we need a list not a string, so do something to change the type
snake_case_ = arr.split(''',''' )
def A_ ( self : Union[str, Any] ):
snake_case_ = [int(self.array[0] )] * len(self.array )
snake_case_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
snake_case_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
snake_case_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a : Dict = input('please input some numbers:')
a : str = SubArray(whole_array)
a : Dict = array.solve_sub_array()
print(('the results is:', re))
| 56
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = AutoencoderKL
snake_case_ = "sample"
snake_case_ = 1e-2
@property
def A_ ( self : Dict ):
snake_case_ = 4
snake_case_ = 3
snake_case_ = (32, 32)
snake_case_ = floats_tensor((batch_size, num_channels) + sizes ).to(lowercase_ )
return {"sample": image}
@property
def A_ ( self : List[Any] ):
return (3, 32, 32)
@property
def A_ ( self : Dict ):
return (3, 32, 32)
def A_ ( self : Union[str, Any] ):
snake_case_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
snake_case_ = self.dummy_input
return init_dict, inputs_dict
def A_ ( self : Any ):
pass
def A_ ( self : str ):
pass
@unittest.skipIf(torch_device == '''mps''' , '''Gradient checkpointing skipped on MPS''' )
def A_ ( self : Dict ):
# enable deterministic behavior for gradient checkpointing
snake_case_ ,snake_case_ = self.prepare_init_args_and_inputs_for_common()
snake_case_ = self.model_class(**lowercase_ )
model.to(lowercase_ )
assert not model.is_gradient_checkpointing and model.training
snake_case_ = model(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
snake_case_ = torch.randn_like(lowercase_ )
snake_case_ = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
snake_case_ = self.model_class(**lowercase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowercase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
snake_case_ = model_a(**lowercase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
snake_case_ = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
snake_case_ = dict(model.named_parameters() )
snake_case_ = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def A_ ( self : Tuple ):
snake_case_ ,snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' , output_loading_info=lowercase_ )
self.assertIsNotNone(lowercase_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) , 0 )
model.to(lowercase_ )
snake_case_ = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def A_ ( self : Tuple ):
snake_case_ = AutoencoderKL.from_pretrained('''fusing/autoencoder-kl-dummy''' )
snake_case_ = model.to(lowercase_ )
model.eval()
if torch_device == "mps":
snake_case_ = torch.manual_seed(0 )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
snake_case_ = image.to(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , sample_posterior=lowercase_ , generator=lowercase_ ).sample
snake_case_ = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
snake_case_ = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
snake_case_ = torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
snake_case_ = torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1e-2 ) )
@slow
class a ( unittest.TestCase ):
def A_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] ):
return F"gaussian_noise_s={seed}_shape={'_'.join([str(lowercase_ ) for s in shape] )}.npy"
def A_ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Dict , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=(4, 3, 512, 512) , lowercase_ : Optional[Any]=False ):
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = torch.from_numpy(load_hf_numpy(self.get_file_format(lowercase_ , lowercase_ ) ) ).to(lowercase_ ).to(lowercase_ )
return image
def A_ ( self : Any , lowercase_ : Dict="CompVis/stable-diffusion-v1-4" , lowercase_ : List[str]=False ):
snake_case_ = '''fp16''' if fpaa else None
snake_case_ = torch.floataa if fpaa else torch.floataa
snake_case_ = AutoencoderKL.from_pretrained(
lowercase_ , subfolder='''vae''' , torch_dtype=lowercase_ , revision=lowercase_ , )
model.to(lowercase_ ).eval()
return model
def A_ ( self : Any , lowercase_ : int=0 ):
if torch_device == "mps":
return torch.manual_seed(lowercase_ )
return torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , fpaa=lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ , generator=lowercase_ , sample_posterior=lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def A_ ( self : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
with torch.no_grad():
snake_case_ = model(lowercase_ ).sample
assert sample.shape == image.shape
snake_case_ = sample[-1, -2:, -2:, :2].flatten().float().cpu()
snake_case_ = torch.tensor(expected_slice_mps if torch_device == '''mps''' else expected_slice )
assert torch_all_close(lowercase_ , lowercase_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Dict , lowercase_ : Tuple , lowercase_ : Optional[int] ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def A_ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Optional[Any] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
snake_case_ = sample[-1, -2:, :2, -2:].flatten().float().cpu()
snake_case_ = torch.tensor(lowercase_ )
assert torch_all_close(lowercase_ , lowercase_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : List[str] ):
snake_case_ = self.get_sd_vae_model(fpaa=lowercase_ )
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) , fpaa=lowercase_ )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='''xformers is not required when using PyTorch 2.0.''' )
def A_ ( self : Optional[Any] , lowercase_ : Any ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
snake_case_ = model.decode(lowercase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowercase_ , lowercase_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple ):
snake_case_ = self.get_sd_vae_model()
snake_case_ = self.get_sd_image(lowercase_ )
snake_case_ = self.get_generator(lowercase_ )
with torch.no_grad():
snake_case_ = model.encode(lowercase_ ).latent_dist
snake_case_ = dist.sample(generator=lowercase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
snake_case_ = sample[0, -1, -3:, -3:].flatten().cpu()
snake_case_ = torch.tensor(lowercase_ )
snake_case_ = 3e-3 if torch_device != '''mps''' else 1e-2
assert torch_all_close(lowercase_ , lowercase_ , atol=lowercase_ )
| 56
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ = None):
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = random()
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
def __repr__( self):
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)} , indent=1)
def __str__( self):
__SCREAMING_SNAKE_CASE = str(self.value) + """ """
__SCREAMING_SNAKE_CASE = str(self.left or """""")
__SCREAMING_SNAKE_CASE = str(self.right or """""")
return value + left + right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(root.left , lowercase_ )
return left, root
else:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(root.right , lowercase_ )
return root, right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__SCREAMING_SNAKE_CASE = merge(left.right , lowercase_ )
return left
else:
__SCREAMING_SNAKE_CASE = merge(lowercase_ , right.left )
return right
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = Node(lowercase_ )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(lowercase_ , lowercase_ )
return merge(merge(lowercase_ , lowercase_ ) , lowercase_ )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(lowercase_ , value - 1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = split(lowercase_ , lowercase_ )
return merge(lowercase_ , lowercase_ )
def _lowerCAmelCase ( UpperCamelCase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=""",""" )
inorder(root.right )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
for arg in args.split():
if arg[0] == "+":
__SCREAMING_SNAKE_CASE = insert(lowercase_ , int(arg[1:] ) )
elif arg[0] == "-":
__SCREAMING_SNAKE_CASE = erase(lowercase_ , int(arg[1:] ) )
else:
print("""Unknown command""" )
return root
def _lowerCAmelCase ( ):
__SCREAMING_SNAKE_CASE = None
print(
"""enter numbers to create a tree, + value to add value into treap, """
"""- value to erase all nodes with value. \'q\' to quit. """ )
__SCREAMING_SNAKE_CASE = input()
while args != "q":
__SCREAMING_SNAKE_CASE = interact_treap(lowercase_ , lowercase_ )
print(lowercase_ )
__SCREAMING_SNAKE_CASE = input()
print("""good by!""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 357
|
"""simple docstring"""
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__magic_name__ = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__magic_name__ = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__magic_name__ = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
__magic_name__ = {
"num_train_timesteps": 40,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__magic_name__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
__magic_name__ = {
"num_train_timesteps": 151,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
def _lowerCAmelCase ( UpperCamelCase_ ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.in_layers.0.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.in_layers.0.bias"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.in_layers.2.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.in_layers.2.bias"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.emb_layers.1.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.emb_layers.1.bias"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.out_layers.0.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.out_layers.0.bias"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.out_layers.3.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.out_layers.3.bias"]
if has_skip:
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.skip_connection.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.skip_connection.bias"]
return new_checkpoint
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ):
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.qkv.weight"].chunk(3 , dim=0 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.qkv.bias"].chunk(3 , dim=0 )
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.norm.weight"]
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.norm.bias"]
__SCREAMING_SNAKE_CASE = weight_q.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = bias_q.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = weight_k.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = bias_k.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = weight_v.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = bias_v.squeeze(-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = (
checkpoint[f"{old_prefix}.proj_out.weight"].squeeze(-1 ).squeeze(-1 )
)
__SCREAMING_SNAKE_CASE = checkpoint[f"{old_prefix}.proj_out.bias"].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _lowerCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = torch.load(UpperCamelCase_ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""time_embed.0.bias"""]
__SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
__SCREAMING_SNAKE_CASE = checkpoint["""label_emb.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""input_blocks.0.0.bias"""]
__SCREAMING_SNAKE_CASE = unet_config["""down_block_types"""]
__SCREAMING_SNAKE_CASE = unet_config["""layers_per_block"""]
__SCREAMING_SNAKE_CASE = unet_config["""attention_head_dim"""]
__SCREAMING_SNAKE_CASE = unet_config["""block_out_channels"""]
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = channels_list[i]
__SCREAMING_SNAKE_CASE = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = f"down_blocks.{i}.resnets.{j}"
__SCREAMING_SNAKE_CASE = f"input_blocks.{current_layer}.0"
__SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , has_skip=UpperCamelCase_ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = f"down_blocks.{i}.resnets.{j}"
__SCREAMING_SNAKE_CASE = f"input_blocks.{current_layer}.0"
__SCREAMING_SNAKE_CASE = True if j == 0 and downsample_block_has_skip else False
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , has_skip=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = f"down_blocks.{i}.attentions.{j}"
__SCREAMING_SNAKE_CASE = f"input_blocks.{current_layer}.1"
__SCREAMING_SNAKE_CASE = convert_attention(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
current_layer += 1
if i != len(UpperCamelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = f"down_blocks.{i}.downsamplers.0"
__SCREAMING_SNAKE_CASE = f"input_blocks.{current_layer}.0"
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
current_layer += 1
__SCREAMING_SNAKE_CASE = current_channels
# hardcoded the mid-block for now
__SCREAMING_SNAKE_CASE = """mid_block.resnets.0"""
__SCREAMING_SNAKE_CASE = """middle_block.0"""
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = """mid_block.attentions.0"""
__SCREAMING_SNAKE_CASE = """middle_block.1"""
__SCREAMING_SNAKE_CASE = convert_attention(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = """mid_block.resnets.1"""
__SCREAMING_SNAKE_CASE = """middle_block.2"""
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase_ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
__SCREAMING_SNAKE_CASE = f"up_blocks.{i}.resnets.{j}"
__SCREAMING_SNAKE_CASE = f"output_blocks.{current_layer}.0"
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , has_skip=UpperCamelCase_ )
current_layer += 1
if i != len(UpperCamelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = f"up_blocks.{i}.upsamplers.0"
__SCREAMING_SNAKE_CASE = f"output_blocks.{current_layer-1}.1"
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
__SCREAMING_SNAKE_CASE = f"up_blocks.{i}.resnets.{j}"
__SCREAMING_SNAKE_CASE = f"output_blocks.{current_layer}.0"
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , has_skip=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = f"up_blocks.{i}.attentions.{j}"
__SCREAMING_SNAKE_CASE = f"output_blocks.{current_layer}.1"
__SCREAMING_SNAKE_CASE = convert_attention(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
current_layer += 1
if i != len(UpperCamelCase_ ) - 1:
__SCREAMING_SNAKE_CASE = f"up_blocks.{i}.upsamplers.0"
__SCREAMING_SNAKE_CASE = f"output_blocks.{current_layer-1}.2"
__SCREAMING_SNAKE_CASE = convert_resnet(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = checkpoint["""out.0.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""out.0.bias"""]
__SCREAMING_SNAKE_CASE = checkpoint["""out.2.weight"""]
__SCREAMING_SNAKE_CASE = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
__magic_name__ = parser.parse_args()
__magic_name__ = strabool(args.class_cond)
__magic_name__ = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__magic_name__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__magic_name__ = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__magic_name__ = None
__magic_name__ = con_pt_to_diffuser(args.unet_path, unet_config)
__magic_name__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__magic_name__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__magic_name__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
__magic_name__ = CMStochasticIterativeScheduler(**scheduler_config)
__magic_name__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 255
| 0
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCAmelCase (_UpperCamelCase = "laptop" ):
__lowerCAmelCase : Any = F"https://www.amazon.in/laptop/s?k={product}"
__lowerCAmelCase : Dict = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
__lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(_UpperCamelCase , headers=_UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Union[str, Any] = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
__lowerCAmelCase : str = item.ha.text
__lowerCAmelCase : Tuple = 'https://www.amazon.in/' + item.ha.a['href']
__lowerCAmelCase : Optional[Any] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
__lowerCAmelCase : List[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
__lowerCAmelCase : List[str] = 'Not available'
try:
__lowerCAmelCase : Any = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
__lowerCAmelCase : Optional[int] = ''
try:
__lowerCAmelCase : List[Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : str = float('nan' )
except AttributeError:
pass
__lowerCAmelCase : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : List[Any] = ' '
__lowerCAmelCase : Tuple = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = """headphones"""
get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
| 86
|
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 86
| 1
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _lowercase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowercase_ = AltDiffusionPipeline
lowercase_ = TEXT_TO_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
def _UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase : str = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCAmelCase_ , set_alpha_to_one=UpperCAmelCase_ , )
torch.manual_seed(0 )
lowerCamelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5002 , )
lowerCamelCase : int = CLIPTextModel(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = XLMRobertaTokenizer.from_pretrained('hf-internal-testing/tiny-xlm-roberta' )
lowerCamelCase : List[Any] = 77
lowerCamelCase : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=0 ) -> str:
if str(UpperCAmelCase_ ).startswith('mps' ):
lowerCamelCase : str = torch.manual_seed(UpperCAmelCase_ )
else:
lowerCamelCase : Optional[int] = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
lowerCamelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ) -> int:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ) -> List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : str = self.get_dummy_components()
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : Optional[Any] = RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
lowerCamelCase : List[str] = text_encoder
lowerCamelCase : str = AltDiffusionPipeline(**UpperCAmelCase_ )
lowerCamelCase : List[Any] = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCamelCase : str = 'A photo of an astronaut'
lowerCamelCase : Any = alt_pipe(**UpperCAmelCase_ )
lowerCamelCase : str = output.images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : Union[str, Any] = np.array(
[0.5748162, 0.60447145, 0.48821217, 0.50100636, 0.5431185, 0.45763683, 0.49657696, 0.48132733, 0.47573093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase : Dict = self.get_dummy_components()
lowerCamelCase : str = PNDMScheduler(skip_prk_steps=UpperCAmelCase_ )
torch.manual_seed(0 )
lowerCamelCase : Union[str, Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=5002 , )
# TODO: remove after fixing the non-deterministic text encoder
lowerCamelCase : int = RobertaSeriesModelWithTransformation(UpperCAmelCase_ )
lowerCamelCase : List[Any] = text_encoder
lowerCamelCase : Union[str, Any] = AltDiffusionPipeline(**UpperCAmelCase_ )
lowerCamelCase : Any = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : str = self.get_dummy_inputs(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = alt_pipe(**UpperCAmelCase_ )
lowerCamelCase : Any = output.images
lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase : str = np.array(
[0.51605093, 0.5707241, 0.47365507, 0.50578886, 0.5633877, 0.4642503, 0.5182081, 0.48763484, 0.49084237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ) -> List[Any]:
# make sure here that pndm scheduler skips prk
lowerCamelCase : Any = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , safety_checker=UpperCAmelCase_ )
lowerCamelCase : str = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger'
lowerCamelCase : List[str] = torch.manual_seed(0 )
lowerCamelCase : List[Any] = alt_pipe([prompt] , generator=UpperCAmelCase_ , guidance_scale=6.0 , num_inference_steps=20 , output_type='np' )
lowerCamelCase : Optional[Any] = output.images
lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : int = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Optional[int] = DDIMScheduler.from_pretrained('BAAI/AltDiffusion' , subfolder='scheduler' )
lowerCamelCase : Tuple = AltDiffusionPipeline.from_pretrained('BAAI/AltDiffusion' , scheduler=UpperCAmelCase_ , safety_checker=UpperCAmelCase_ )
lowerCamelCase : Dict = alt_pipe.to(UpperCAmelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
lowerCamelCase : int = 'A painting of a squirrel eating a burger'
lowerCamelCase : str = torch.manual_seed(0 )
lowerCamelCase : List[str] = alt_pipe([prompt] , generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='numpy' )
lowerCamelCase : Optional[int] = output.images
lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase : Optional[int] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 357
|
"""simple docstring"""
def UpperCAmelCase ( a_ = 10 ):
'''simple docstring'''
if not isinstance(a_, a_ ) or n < 0:
raise ValueError('Invalid input' )
lowerCamelCase : Union[str, Any] = 10**n
lowerCamelCase : int = 2_8433 * (pow(2, 783_0457, a_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(1_0) = }""")
| 205
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_a = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCAmelCase( __lowercase ):
lowercase__ = ["pixel_values"]
def __init__( self , __a = True , __a = None , __a = PILImageResampling.BICUBIC , __a = True , __a = None , __a = True , __a = 1 / 2_55 , __a = True , __a = None , __a = None , __a = True , **__a , ) -> List[str]:
'''simple docstring'''
super().__init__(**snake_case_)
_UpperCamelCase = size if size is not None else {'''shortest_edge''': 2_24}
_UpperCamelCase = get_size_dict(snake_case_ , default_to_square=snake_case_)
_UpperCamelCase = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
_UpperCamelCase = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='''crop_size''')
_UpperCamelCase = do_resize
_UpperCamelCase = size
_UpperCamelCase = resample
_UpperCamelCase = do_center_crop
_UpperCamelCase = crop_size
_UpperCamelCase = do_rescale
_UpperCamelCase = rescale_factor
_UpperCamelCase = do_normalize
_UpperCamelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_UpperCamelCase = image_std if image_std is not None else OPENAI_CLIP_STD
_UpperCamelCase = do_convert_rgb
def UpperCAmelCase ( self , __a , __a , __a = PILImageResampling.BICUBIC , __a = None , **__a , ) -> Dict:
'''simple docstring'''
_UpperCamelCase = get_size_dict(snake_case_ , default_to_square=snake_case_)
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''')
_UpperCamelCase = get_resize_output_image_size(snake_case_ , size=size['''shortest_edge'''] , default_to_square=snake_case_)
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = get_size_dict(snake_case_)
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''')
return center_crop(snake_case_ , size=(size['''height'''], size['''width''']) , data_format=snake_case_ , **snake_case_)
def UpperCAmelCase ( self , __a , __a , __a = None , **__a , ) -> List[str]:
'''simple docstring'''
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_)
def UpperCAmelCase ( self , __a , __a , __a , __a = None , **__a , ) -> Union[str, Any]:
'''simple docstring'''
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_)
def UpperCAmelCase ( self , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = None , __a = ChannelDimension.FIRST , **__a , ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = do_resize if do_resize is not None else self.do_resize
_UpperCamelCase = size if size is not None else self.size
_UpperCamelCase = get_size_dict(snake_case_ , param_name='''size''' , default_to_square=snake_case_)
_UpperCamelCase = resample if resample is not None else self.resample
_UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCamelCase = crop_size if crop_size is not None else self.crop_size
_UpperCamelCase = get_size_dict(snake_case_ , param_name='''crop_size''' , default_to_square=snake_case_)
_UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCamelCase = image_mean if image_mean is not None else self.image_mean
_UpperCamelCase = image_std if image_std is not None else self.image_std
_UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_UpperCamelCase = make_list_of_images(snake_case_)
if not valid_images(snake_case_):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''')
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''')
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''')
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''')
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_UpperCamelCase = [convert_to_rgb(snake_case_) for image in images]
# All transformations expect numpy arrays.
_UpperCamelCase = [to_numpy_array(snake_case_) for image in images]
if do_resize:
_UpperCamelCase = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_) for image in images]
if do_center_crop:
_UpperCamelCase = [self.center_crop(image=snake_case_ , size=snake_case_) for image in images]
if do_rescale:
_UpperCamelCase = [self.rescale(image=snake_case_ , scale=snake_case_) for image in images]
if do_normalize:
_UpperCamelCase = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_) for image in images]
_UpperCamelCase = [to_channel_dimension_format(snake_case_ , snake_case_) for image in images]
_UpperCamelCase = {'''pixel_values''': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_)
| 194
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ ={
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
lowercase__ ='ETAOINSHRDLCUMWFGYPBVKJXQZ'
lowercase__ ='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : List[Any] = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __UpperCamelCase ( lowerCAmelCase__ : tuple ):
return x[0]
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : Optional[Any] = get_letter_count(lowerCAmelCase__ )
__a : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowerCAmelCase__ )
__a : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowerCAmelCase__ )
__a : int = ''''''.join(freq_to_letter[freq] )
__a : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowerCAmelCase__ , reverse=lowerCAmelCase__ )
__a : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = get_frequency_order(lowerCAmelCase__ )
__a : str = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 0
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class __lowerCamelCase ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__UpperCamelCase = field(default='question-answering-extractive' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCamelCase = Features({'question': Value('string' ), 'context': Value('string' )} )
__UpperCamelCase = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
__UpperCamelCase = "question"
__UpperCamelCase = "context"
__UpperCamelCase = "answers"
@property
def A__ (self ):
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 317
|
"""simple docstring"""
from functools import reduce
SCREAMING_SNAKE_CASE : int = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def __UpperCAmelCase ( snake_case_ : str = N ) -> int:
"""simple docstring"""
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F'{solution() = }')
| 317
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__a = '''src/diffusers'''
__a = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
__a = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
__a = spec.loader.load_module()
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->List[Any]:
"""simple docstring"""
return line.startswith(_UpperCamelCase ) or len(_UpperCamelCase ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''', _UpperCamelCase ) is not None
def __lowercase ( _UpperCamelCase ) ->Optional[Any]:
"""simple docstring"""
lowercase : List[str] = object_name.split('''.''' )
lowercase : List[str] = 0
# First let's find the module where our object lives.
lowercase : Dict = parts[i]
while i < len(_UpperCamelCase ) and not os.path.isfile(os.path.join(_UpperCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(_UpperCamelCase ):
lowercase : int = os.path.join(_UpperCamelCase, parts[i] )
if i >= len(_UpperCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(_UpperCamelCase, f"""{module}.py""" ), '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
lowercase : List[Any] = f.readlines()
# Now let's find the class / func in the code!
lowercase : Optional[Any] = ''''''
lowercase : Optional[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(_UpperCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(_UpperCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase : Tuple = line_index
while line_index < len(_UpperCamelCase ) and _should_continue(lines[line_index], _UpperCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : str = lines[start_index:line_index]
return "".join(_UpperCamelCase )
__a = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
__a = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
__a = re.compile(r'''<FILL\s+[^>]*>''')
def __lowercase ( _UpperCamelCase ) ->List[str]:
"""simple docstring"""
lowercase : str = code.split('''\n''' )
lowercase : str = 0
while idx < len(_UpperCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(_UpperCamelCase ):
return re.search(R'''^(\s*)\S''', lines[idx] ).groups()[0]
return ""
def __lowercase ( _UpperCamelCase ) ->Union[str, Any]:
"""simple docstring"""
lowercase : List[Any] = len(get_indent(_UpperCamelCase ) ) > 0
if has_indent:
lowercase : Dict = f"""class Bla:\n{code}"""
lowercase : Dict = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=_UpperCamelCase )
lowercase : List[Any] = black.format_str(_UpperCamelCase, mode=_UpperCamelCase )
lowercase , lowercase : str = style_docstrings_in_code(_UpperCamelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def __lowercase ( _UpperCamelCase, _UpperCamelCase=False ) ->Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
lowercase : List[Any] = f.readlines()
lowercase : Tuple = []
lowercase : List[Any] = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(_UpperCamelCase ):
lowercase : Dict = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase , lowercase , lowercase : Any = search.groups()
lowercase : Any = find_code_in_diffusers(_UpperCamelCase )
lowercase : Dict = get_indent(_UpperCamelCase )
lowercase : Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase : int = theoretical_indent
lowercase : Union[str, Any] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase : Optional[Any] = True
while line_index < len(_UpperCamelCase ) and should_continue:
line_index += 1
if line_index >= len(_UpperCamelCase ):
break
lowercase : List[Any] = lines[line_index]
lowercase : str = _should_continue(_UpperCamelCase, _UpperCamelCase ) and re.search(f"""^{indent}# End copy""", _UpperCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase : List[str] = lines[start_index:line_index]
lowercase : Tuple = ''''''.join(_UpperCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase : Dict = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(_UpperCamelCase ) is None]
lowercase : Dict = '''\n'''.join(_UpperCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(_UpperCamelCase ) > 0:
lowercase : Any = replace_pattern.replace('''with''', '''''' ).split(''',''' )
lowercase : Dict = [_re_replace_pattern.search(_UpperCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase , lowercase , lowercase : Any = pattern.groups()
lowercase : int = re.sub(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase )
if option.strip() == "all-casing":
lowercase : List[str] = re.sub(obja.lower(), obja.lower(), _UpperCamelCase )
lowercase : Optional[int] = re.sub(obja.upper(), obja.upper(), _UpperCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase : List[Any] = blackify(lines[start_index - 1] + theoretical_code )
lowercase : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase : Optional[Any] = start_index + 1
if overwrite and len(_UpperCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(_UpperCamelCase, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(_UpperCamelCase )
return diffs
def __lowercase ( _UpperCamelCase = False ) ->Any:
"""simple docstring"""
lowercase : Dict = glob.glob(os.path.join(_UpperCamelCase, '''**/*.py''' ), recursive=_UpperCamelCase )
lowercase : Dict = []
for filename in all_files:
lowercase : Any = is_copy_consistent(_UpperCamelCase, _UpperCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(_UpperCamelCase ) > 0:
lowercase : Tuple = '''\n'''.join(_UpperCamelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__a = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
| 1
|
"""simple docstring"""
from __future__ import annotations
import math
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) != 2 or len(a[0] ) != 2 or len(__UpperCamelCase ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
__UpperCamelCase =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__UpperCamelCase ) )
]
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
if len(__UpperCamelCase ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =matrix_length // 2
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase )]
__UpperCamelCase =[
[a[i][j] for j in range(__UpperCamelCase , __UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )
]
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase )]
__UpperCamelCase =[[a[i][j] for j in range(__UpperCamelCase )] for i in range(__UpperCamelCase , __UpperCamelCase )]
return top_left, top_right, bot_left, bot_right
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
return len(__UpperCamelCase ), len(matrix[0] )
def lowerCAmelCase (__UpperCamelCase : list ):
"""simple docstring"""
print('''\n'''.join(str(__UpperCamelCase ) for line in matrix ) )
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase ) == (2, 2):
return default_matrix_multiplication(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =split_matrix(__UpperCamelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =split_matrix(__UpperCamelCase )
__UpperCamelCase =actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =actual_strassen(__UpperCamelCase , matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =actual_strassen(matrix_subtraction(__UpperCamelCase , __UpperCamelCase ) , matrix_addition(__UpperCamelCase , __UpperCamelCase ) )
__UpperCamelCase =matrix_addition(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
__UpperCamelCase =matrix_addition(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =matrix_addition(__UpperCamelCase , __UpperCamelCase )
__UpperCamelCase =matrix_subtraction(matrix_subtraction(matrix_addition(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ) , __UpperCamelCase )
# construct the new matrix from our 4 quadrants
__UpperCamelCase =[]
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__UpperCamelCase ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : list ):
"""simple docstring"""
if matrix_dimensions(__UpperCamelCase )[1] != matrix_dimensions(__UpperCamelCase )[0]:
__UpperCamelCase =(
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(__UpperCamelCase )
__UpperCamelCase =matrix_dimensions(__UpperCamelCase )
__UpperCamelCase =matrix_dimensions(__UpperCamelCase )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
__UpperCamelCase =max(*__UpperCamelCase , *__UpperCamelCase )
__UpperCamelCase =int(math.pow(2 , math.ceil(math.loga(__UpperCamelCase ) ) ) )
__UpperCamelCase =matrixa
__UpperCamelCase =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
__UpperCamelCase =actual_strassen(__UpperCamelCase , __UpperCamelCase )
# Removing the additional zeros
for i in range(0 , __UpperCamelCase ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __UpperCamelCase ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
__lowercase = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
__lowercase = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 353
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _lowercase :
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase =data
__UpperCamelCase =None
class _lowercase :
"""simple docstring"""
def __init__( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase =None
__UpperCamelCase =None
def __iter__( self : int ) -> Iterator[Any]:
'''simple docstring'''
__UpperCamelCase =self.head
while self.head:
yield node.data
__UpperCamelCase =node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : str ) -> Union[str, Any]:
'''simple docstring'''
return "->".join(str(UpperCamelCase__ ) for item in iter(self ) )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
self.insert_nth(0 , UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =Node(UpperCamelCase__ )
if self.head is None:
__UpperCamelCase =new_node # first node points itself
__UpperCamelCase =__UpperCamelCase =new_node
elif index == 0: # insert at head
__UpperCamelCase =self.head
__UpperCamelCase =__UpperCamelCase =new_node
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =new_node
if index == len(self ) - 1: # insert at tail
__UpperCamelCase =new_node
def UpperCAmelCase_ ( self : Any ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def UpperCAmelCase_ ( self : Optional[int] ) -> Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
__UpperCamelCase =self.head
if self.head == self.tail: # just one node
__UpperCamelCase =__UpperCamelCase =None
elif index == 0: # delete head node
__UpperCamelCase =self.tail.next.next
__UpperCamelCase =self.head.next
else:
__UpperCamelCase =self.head
for _ in range(index - 1 ):
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next
__UpperCamelCase =temp.next.next
if index == len(self ) - 1: # delete at tail
__UpperCamelCase =temp
return delete_node.data
def UpperCAmelCase_ ( self : str ) -> bool:
'''simple docstring'''
return len(self ) == 0
def lowerCAmelCase ():
"""simple docstring"""
__UpperCamelCase =CircularLinkedList()
assert len(__UpperCamelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCamelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCamelCase ) == i
circular_linked_list.insert_nth(__UpperCamelCase , i + 1 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
|
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase : List[str] = sum(i * i for i in range(1 , n + 1 ) )
lowercase : Dict = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255
| 0
|
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
lowerCAmelCase_ = None
lowerCAmelCase_ = {
'''7B''': 1_1_0_0_8,
'''13B''': 1_3_8_2_4,
'''30B''': 1_7_9_2_0,
'''65B''': 2_2_0_1_6,
'''70B''': 2_8_6_7_2,
}
lowerCAmelCase_ = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1 , _UpperCamelCase=256 ) -> Optional[int]:
"""simple docstring"""
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase_ ( _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
with open(_UpperCamelCase , '''r''' ) as f:
return json.load(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int:
"""simple docstring"""
with open(_UpperCamelCase , '''w''' ) as f:
json.dump(_UpperCamelCase , _UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : int = os.path.join(_UpperCamelCase , '''tmp''' )
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
snake_case_ : Dict = read_json(os.path.join(_UpperCamelCase , '''params.json''' ) )
snake_case_ : Tuple = NUM_SHARDS[model_size]
snake_case_ : Optional[Any] = params['''n_layers''']
snake_case_ : int = params['''n_heads''']
snake_case_ : Dict = n_heads // num_shards
snake_case_ : List[Any] = params['''dim''']
snake_case_ : str = dim // n_heads
snake_case_ : Any = 10_000.0
snake_case_ : Any = 1.0 / (base ** (torch.arange(0 , _UpperCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
snake_case_ : Optional[Any] = params['''n_kv_heads'''] # for GQA / MQA
snake_case_ : Optional[Any] = n_heads_per_shard // num_key_value_heads
snake_case_ : List[Any] = dim // num_key_value_heads
else: # compatibility with other checkpoints
snake_case_ : str = n_heads
snake_case_ : Optional[int] = n_heads_per_shard
snake_case_ : str = dim
# permute for sliced rotary
def permute(_UpperCamelCase , _UpperCamelCase=n_heads , _UpperCamelCase=dim , _UpperCamelCase=dim ):
return w.view(_UpperCamelCase , dima // n_heads // 2 , 2 , _UpperCamelCase ).transpose(1 , 2 ).reshape(_UpperCamelCase , _UpperCamelCase )
print(f'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
snake_case_ : Optional[Any] = torch.load(os.path.join(_UpperCamelCase , '''consolidated.00.pth''' ) , map_location='''cpu''' )
else:
# Sharded
snake_case_ : Union[str, Any] = [
torch.load(os.path.join(_UpperCamelCase , f'''consolidated.{i:02d}.pth''' ) , map_location='''cpu''' )
for i in range(_UpperCamelCase )
]
snake_case_ : Optional[Any] = 0
snake_case_ : str = {'''weight_map''': {}}
for layer_i in range(_UpperCamelCase ):
snake_case_ : Optional[int] = f'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : str = {
f'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wq.weight'''] ),
f'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[f'''layers.{layer_i}.attention.wk.weight'''] ),
f'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[f'''layers.{layer_i}.attention.wv.weight'''],
f'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[f'''layers.{layer_i}.attention.wo.weight'''],
f'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w1.weight'''],
f'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w2.weight'''],
f'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[f'''layers.{layer_i}.feed_forward.w3.weight'''],
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[f'''layers.{layer_i}.attention_norm.weight'''],
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[f'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
snake_case_ : Union[str, Any] = {
f'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.attention_norm.weight'''
].clone(),
f'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
f'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
snake_case_ : int = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wq.weight'''].view(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Optional[int] = permute(
torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wk.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
snake_case_ : int = torch.cat(
[
loaded[i][f'''layers.{layer_i}.attention.wv.weight'''].view(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
for i in range(_UpperCamelCase )
] , dim=0 , ).reshape(_UpperCamelCase , _UpperCamelCase )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.attention.wo.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Dict = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : Union[str, Any] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(_UpperCamelCase )] , dim=1 )
snake_case_ : Optional[int] = torch.cat(
[loaded[i][f'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(_UpperCamelCase )] , dim=0 )
snake_case_ : str = inv_freq
for k, v in state_dict.items():
snake_case_ : Dict = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
snake_case_ : Any = f'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
snake_case_ : List[str] = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
snake_case_ : Dict = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(_UpperCamelCase )] , dim=1 ),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(_UpperCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
snake_case_ : List[str] = filename
param_count += v.numel()
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
# Write configs
snake_case_ : int = {'''total_size''': param_count * 2}
write_json(_UpperCamelCase , os.path.join(_UpperCamelCase , '''pytorch_model.bin.index.json''' ) )
snake_case_ : str = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
snake_case_ : Optional[int] = params['''multiple_of'''] if '''multiple_of''' in params else 256
snake_case_ : Optional[Any] = LlamaConfig(
hidden_size=_UpperCamelCase , intermediate_size=compute_intermediate_size(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=_UpperCamelCase , )
config.save_pretrained(_UpperCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''' )
snake_case_ : Union[str, Any] = LlamaForCausalLM.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=_UpperCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''' )
model.save_pretrained(_UpperCamelCase , safe_serialization=_UpperCamelCase )
shutil.rmtree(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
snake_case_ : Union[str, Any] = tokenizer_class(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
def lowerCamelCase_ ( ) -> int:
"""simple docstring"""
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=_UpperCamelCase , help='''Whether or not to save using `safetensors`.''' )
snake_case_ : Dict = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
snake_case_ : Dict = os.path.join(args.input_dir , '''tokenizer.model''' )
write_tokenizer(args.output_dir , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279
|
import random
from .binary_exp_mod import bin_exp_mod
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=1_000 ) -> str:
"""simple docstring"""
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
snake_case_ : int = n - 1
snake_case_ : Union[str, Any] = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
snake_case_ : List[Any] = 0
while count < prec:
snake_case_ : Tuple = random.randint(2 , n - 1 )
snake_case_ : List[Any] = bin_exp_mod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if b != 1:
snake_case_ : Any = True
for _ in range(_UpperCamelCase ):
if b == n - 1:
snake_case_ : Tuple = False
break
snake_case_ : List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
lowerCAmelCase_ = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 279
| 1
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BarthezTokenizer
a_ = BarthezTokenizerFast
a_ = True
a_ = True
def A ( self : Optional[Any] ) -> List[str]:
super().setUp()
UpperCAmelCase_ : Tuple = BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_A )
UpperCAmelCase_ : str = tokenizer
def A ( self : Union[str, Any] ) -> Any:
UpperCAmelCase_ : int = '''<pad>'''
UpperCAmelCase_ : int = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def A ( self : int ) -> Dict:
UpperCAmelCase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 10_11_22 )
def A ( self : int ) -> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def A ( self : Tuple ) -> Any:
UpperCAmelCase_ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
UpperCAmelCase_ : int = [0, 57, 30_18, 7_03_07, 91, 2]
UpperCAmelCase_ : Optional[int] = self.tokenizer(
_A , max_length=len(_A ) , padding=_A , truncation=_A , return_tensors='''pt''' )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCAmelCase_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(_A , _A )
def A ( self : Any ) -> Tuple:
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ : List[Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[Any] = self.get_rust_tokenizer()
UpperCAmelCase_ : Dict = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize(_A )
UpperCAmelCase_ : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase_ : Union[str, Any] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Tuple = self.get_rust_tokenizer()
UpperCAmelCase_ : List[Any] = tokenizer.encode(_A )
UpperCAmelCase_ : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
@slow
def A ( self : Tuple ) -> Tuple:
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCAmelCase_ : Dict = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=_A , )
| 304
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 205
| 0
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__A = logging.get_logger(__name__)
__A = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__A = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__A = {"facebook/blenderbot-3B": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase = json.load(lowerCamelCase__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
__lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(lowerCamelCase__ )
__lowerCamelCase = ' '.join(lowerCamelCase__ )
__lowerCamelCase = word
return word
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
__lowerCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = ''.join(lowerCamelCase__ )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__lowerCamelCase = 0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase = token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__lowerCamelCase = ' ' + text
return (text, kwargs)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self , lowerCamelCase__ ) -> List[int]:
'''simple docstring'''
__lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(' ' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
__lowerCamelCase = ' '.join(lowerCamelCase__ )
__lowerCamelCase = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
__lowerCamelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 348
|
__A = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> float:
"""simple docstring"""
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__lowerCamelCase = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {', '.join(UpperCamelCase__ )}"""
)
raise ValueError(UpperCamelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 1
|
def lowercase ( SCREAMING_SNAKE_CASE__ : int ) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError("""Input value must be an 'int' type""" )
_snake_case : List[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 317
|
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
a__ = datasets.utils.logging.get_logger(__name__)
a__ = ["""names""", """prefix"""]
a__ = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
a__ = ["""encoding_errors""", """on_bad_lines"""]
a__ = ["""date_format"""]
@dataclass
class snake_case ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ : str = ","
snake_case_ : Optional[str] = None
snake_case_ : Optional[Union[int, List[int], str]] = "infer"
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[List[str]] = None
snake_case_ : Optional[Union[int, str, List[int], List[str]]] = None
snake_case_ : Optional[Union[List[int], List[str]]] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : Optional[Literal["c", "python", "pyarrow"]] = None
snake_case_ : Dict[Union[int, str], Callable[[Any], Any]] = None
snake_case_ : Optional[list] = None
snake_case_ : Optional[list] = None
snake_case_ : bool = False
snake_case_ : Optional[Union[int, List[int]]] = None
snake_case_ : Optional[int] = None
snake_case_ : Optional[Union[str, List[str]]] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : bool = True
snake_case_ : Optional[str] = None
snake_case_ : str = "."
snake_case_ : Optional[str] = None
snake_case_ : str = '"'
snake_case_ : int = 0
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : Optional[str] = None
snake_case_ : bool = True
snake_case_ : bool = True
snake_case_ : int = 0
snake_case_ : bool = True
snake_case_ : bool = False
snake_case_ : Optional[str] = None
snake_case_ : int = 1_00_00
snake_case_ : Optional[datasets.Features] = None
snake_case_ : Optional[str] = "strict"
snake_case_ : Literal["error", "warn", "skip"] = "error"
snake_case_ : Optional[str] = None
def UpperCamelCase_ ( self : List[Any]) -> Dict:
"""simple docstring"""
if self.delimiter is not None:
_snake_case : str = self.delimiter
if self.column_names is not None:
_snake_case : str = self.column_names
@property
def UpperCamelCase_ ( self : List[Any]) -> str:
"""simple docstring"""
_snake_case : Dict = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCAmelCase):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ : Union[str, Any] = CsvConfig
def UpperCamelCase_ ( self : str) -> List[str]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features)
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : Union[str, Any]) -> List[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''')
_snake_case : Union[str, Any] = dl_manager.download_and_extract(self.config.data_files)
if isinstance(lowerCAmelCase , (str, list, tuple)):
_snake_case : int = data_files
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : int = [files]
_snake_case : int = [dl_manager.iter_files(lowerCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files})]
_snake_case : Union[str, Any] = []
for split_name, files in data_files.items():
if isinstance(lowerCAmelCase , lowerCAmelCase):
_snake_case : List[str] = [files]
_snake_case : Any = [dl_manager.iter_files(lowerCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCAmelCase , gen_kwargs={"""files""": files}))
return splits
def UpperCamelCase_ ( self : Optional[Any] , lowerCAmelCase : pa.Table) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_snake_case : List[str] = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCAmelCase) for feature in self.config.features.values()):
# cheaper cast
_snake_case : Optional[Any] = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCAmelCase)
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_snake_case : Dict = table_cast(lowerCAmelCase , lowerCAmelCase)
return pa_table
def UpperCamelCase_ ( self : str , lowerCAmelCase : str) -> Dict:
"""simple docstring"""
_snake_case : Union[str, Any] = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_snake_case : Optional[Any] = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCAmelCase) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values())
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCAmelCase)):
_snake_case : str = pd.read_csv(lowerCAmelCase , iterator=lowerCAmelCase , dtype=lowerCAmelCase , **self.config.pd_read_csv_kwargs)
try:
for batch_idx, df in enumerate(lowerCAmelCase):
_snake_case : List[Any] = pa.Table.from_pandas(lowerCAmelCase)
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCAmelCase)
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(lowerCAmelCase)}: {e}''')
raise
| 317
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if len(UpperCAmelCase_ ) == 0:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
return min(
minimax(depth + 1 , node_index * 2 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , )
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCAmelCase : Tuple = math.log(len(UpperCAmelCase_ ) , 2 )
print('Optimal value : ' , end='' )
print(minimax(0 , 0 , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 280
|
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : Tuple = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
UpperCAmelCase : Optional[int] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : Union[str, Any] = TextDatasetReader(UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = tmp_path / 'cache'
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , split=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Tuple = text_path
elif issubclass(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[Any] = [text_path]
UpperCAmelCase : List[Any] = tmp_path / 'cache'
UpperCAmelCase : Union[str, Any] = {'text': 'string'}
UpperCAmelCase : List[Any] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_dataset(UpperCAmelCase_ , UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=("train",) ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
for split in splits:
UpperCAmelCase : Union[str, Any] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Any = tmp_path / 'cache'
UpperCAmelCase : List[str] = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase : int = TextDatasetReader({'train': text_path} , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize(
'features' , [
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] , )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Union[str, Any] = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
UpperCAmelCase : Tuple = {'text': 'string'}
UpperCAmelCase : Union[str, Any] = features.copy() if features else default_expected_features
UpperCAmelCase : int = (
Features({feature: Value(UpperCAmelCase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase : List[Any] = TextDatasetReader({'train': text_path} , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
if split:
UpperCAmelCase : int = {split: text_path}
else:
UpperCAmelCase : int = 'train'
UpperCAmelCase : Any = {'train': text_path, 'test': text_path}
UpperCAmelCase : Dict = tmp_path / 'cache'
UpperCAmelCase : Any = {'text': 'string'}
UpperCAmelCase : List[str] = TextDatasetReader(UpperCAmelCase_ , cache_dir=UpperCAmelCase_ ).read()
_check_text_datasetdict(UpperCAmelCase_ , UpperCAmelCase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 280
| 1
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
class snake_case__ ( lowercase_):
a_ = "upernet"
def __init__( self : Optional[Any] , _A : List[str]=None , _A : str=5_12 , _A : Tuple=0.02 , _A : Any=[1, 2, 3, 6] , _A : int=True , _A : List[str]=0.4 , _A : Dict=3_84 , _A : int=2_56 , _A : List[str]=1 , _A : Tuple=False , _A : Any=2_55 , **_A : Any , ) -> Union[str, Any]:
super().__init__(**a__ )
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
UpperCAmelCase_ : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
elif isinstance(a__ , a__ ):
UpperCAmelCase_ : Optional[Any] = backbone_config.get('''model_type''' )
UpperCAmelCase_ : List[Any] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase_ : Optional[Any] = config_class.from_dict(a__ )
UpperCAmelCase_ : Any = backbone_config
UpperCAmelCase_ : List[Any] = hidden_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Tuple = pool_scales
UpperCAmelCase_ : int = use_auxiliary_head
UpperCAmelCase_ : int = auxiliary_loss_weight
UpperCAmelCase_ : Any = auxiliary_in_channels
UpperCAmelCase_ : Dict = auxiliary_channels
UpperCAmelCase_ : List[Any] = auxiliary_num_convs
UpperCAmelCase_ : str = auxiliary_concat_input
UpperCAmelCase_ : Dict = loss_ignore_index
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase_ : List[str] = self.__class__.model_type
return output
| 304
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
_SCREAMING_SNAKE_CASE : int = {
"gpt-neox-20b": 2048,
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : str = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ["input_ids", "attention_mask"]
def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple:
'''simple docstring'''
super().__init__(
a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , )
snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space:
snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) )
snake_case_ = add_prefix_space
snake_case_ = pre_tok_class(**a__ )
snake_case_ = add_prefix_space
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
def lowerCAmelCase__ ( self , a__ ) -> List[int]:
'''simple docstring'''
snake_case_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] )
if len(a__ ) > self.model_max_length:
snake_case_ = input_ids[-self.model_max_length :]
return input_ids
| 85
| 0
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
SCREAMING_SNAKE_CASE_ : Union[str, Any] = logging.getLogger(__name__)
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
# save results
if os.path.exists(UpperCAmelCase_ ):
if os.path.exists(os.path.join(UpperCAmelCase_ , """config.json""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """config.json""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """config.json""" ) )
if os.path.exists(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ) and os.path.isfile(
os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) ):
os.remove(os.path.join(UpperCAmelCase_ , """pytorch_model.bin""" ) )
else:
os.makedirs(UpperCAmelCase_ )
model.save_pretrained(UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int=False ):
A__ = 2
if unlogit:
A__ = torch.pow(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = p * torch.log(UpperCAmelCase_ )
A__ = 0
return -plogp.sum(dim=-1 )
def _snake_case ( UpperCAmelCase_ : int ):
logger.info("""lv, h >\t""" + """\t""".join(F"""{x + 1}""" for x in range(len(UpperCAmelCase_ ) ) ) )
for row in range(len(UpperCAmelCase_ ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + """\t""".join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Tuple=False ):
A__ , A__ = model.config.num_hidden_layers, model.config.num_attention_heads
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
A__ = torch.zeros(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
if head_mask is None:
A__ = torch.ones(UpperCAmelCase_ , UpperCAmelCase_ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCAmelCase_ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
A__ = None
A__ = 0.0
A__ = 0.0
for step, inputs in enumerate(tqdm(UpperCAmelCase_ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0] ) ):
A__ = tuple(t.to(args.device ) for t in inputs )
((A__ ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
A__ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
A__ , A__ , A__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCAmelCase_ ):
A__ = entropy(attn.detach() , UpperCAmelCase_ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCAmelCase_ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
A__ = 2
A__ = torch.pow(torch.pow(UpperCAmelCase_ , UpperCAmelCase_ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
A__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""" )
print_ad_tensor(UpperCAmelCase_ )
if compute_importance:
logger.info("""Head importance scores""" )
print_ad_tensor(UpperCAmelCase_ )
logger.info("""Head ranked by importance scores""" )
A__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
A__ = torch.arange(
head_importance.numel() , device=args.device )
A__ = head_ranks.view_as(UpperCAmelCase_ )
print_ad_tensor(UpperCAmelCase_ )
return attn_entropy, head_importance, total_loss
def _snake_case ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
A__ , A__ , A__ = compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ )
A__ = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , UpperCAmelCase_ , original_score * args.masking_threshold )
A__ = torch.ones_like(UpperCAmelCase_ )
A__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
A__ = original_score
while current_score >= original_score * args.masking_threshold:
A__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
A__ = float("""Inf""" )
A__ = head_importance.view(-1 ).sort()[1]
if len(UpperCAmelCase_ ) <= num_to_mask:
print("""BREAK BY num_to_mask""" )
break
# mask heads
A__ = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist() ) )
A__ = new_head_mask.view(-1 )
A__ = 0.0
A__ = new_head_mask.view_as(UpperCAmelCase_ )
A__ = new_head_mask.clone().detach()
print_ad_tensor(UpperCAmelCase_ )
# Compute metric and head importance again
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , UpperCAmelCase_ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""" )
print_ad_tensor(UpperCAmelCase_ )
np.save(os.path.join(args.output_dir , """head_mask.npy""" ) , head_mask.detach().cpu().numpy() )
return head_mask
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any ):
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
A__ = 1 / loss
A__ = datetime.now() - before_time
A__ = sum(p.numel() for p in model.parameters() )
A__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCAmelCase_ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ = [
v,
]
assert sum(len(UpperCAmelCase_ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCAmelCase_ )
A__ = sum(p.numel() for p in model.parameters() )
A__ = datetime.now()
A__ , A__ , A__ = compute_heads_importance(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , compute_entropy=UpperCAmelCase_ , compute_importance=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , actually_pruned=UpperCAmelCase_ , )
A__ = 1 / loss
A__ = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , UpperCAmelCase_ , UpperCAmelCase_ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100 )
save_model(UpperCAmelCase_ , args.output_dir )
def _snake_case ( ):
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=UpperCAmelCase_ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=UpperCAmelCase_ , type=UpperCAmelCase_ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=UpperCAmelCase_ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""" )
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""" )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""" )
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""" )
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=UpperCAmelCase_ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=UpperCAmelCase_ , help="""Amount to heads to masking at each masking step.""" )
parser.add_argument("""--metric_name""" , default="""acc""" , type=UpperCAmelCase_ , help="""Metric to use for head masking.""" )
parser.add_argument(
"""--max_seq_length""" , default=128 , type=UpperCAmelCase_ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=UpperCAmelCase_ , help="""Batch size.""" )
parser.add_argument("""--seed""" , type=UpperCAmelCase_ , default=42 )
parser.add_argument("""--local_rank""" , type=UpperCAmelCase_ , default=-1 , help="""local_rank for distributed training on gpus""" )
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""" )
parser.add_argument("""--server_ip""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
parser.add_argument("""--server_port""" , type=UpperCAmelCase_ , default="""""" , help="""Can be used for distant debugging.""" )
A__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCAmelCase_ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
A__ = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""" )
A__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
A__ = torch.device("""cuda""" , args.local_rank )
A__ = 1
torch.distributed.init_process_group(backend="""nccl""" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
A__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
A__ = nn.parallel.DistributedDataParallel(
UpperCAmelCase_ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCAmelCase_ )
elif args.n_gpu > 1:
A__ = nn.DataParallel(UpperCAmelCase_ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCAmelCase_ )
torch.save(UpperCAmelCase_ , os.path.join(args.output_dir , """run_args.bin""" ) )
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase_ )
# Prepare dataset
A__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
A__ = (torch.from_numpy(UpperCAmelCase_ ),)
A__ = TensorDataset(*UpperCAmelCase_ )
A__ = RandomSampler(UpperCAmelCase_ )
A__ = DataLoader(UpperCAmelCase_ , sampler=UpperCAmelCase_ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
A__ = mask_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
prune_heads(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 363
|
"""simple docstring"""
from typing import Any
class a :
"""simple docstring"""
def __init__( self: List[Any] , UpperCamelCase: Any ):
"""simple docstring"""
A__ = data
A__ = None
class a :
"""simple docstring"""
def __init__( self: List[str] ):
"""simple docstring"""
A__ = None
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.head
while temp is not None:
print(temp.data , end=""" """ )
A__ = temp.next
print()
def UpperCamelCase ( self: str , UpperCamelCase: Any ):
"""simple docstring"""
A__ = Node(UpperCamelCase )
A__ = self.head
A__ = new_node
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
A__ = self.head
while node_a is not None and node_a.data != node_data_a:
A__ = node_a.next
if node_a is None or node_a is None:
return
A__ , A__ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 69
| 0
|
def lowerCamelCase_ ( ) -> Dict:
"""simple docstring"""
snake_case_ : List[Any] = 0
for i in range(1 , 1_001 ):
total += i**i
return str(_UpperCamelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 279
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
def __init__(self , __magic_name__ , __magic_name__=7 , __magic_name__=3 , __magic_name__=18 , __magic_name__=30 , __magic_name__=400 , __magic_name__=True , __magic_name__=32 , __magic_name__=True , ) -> Dict:
'''simple docstring'''
snake_case_ : Tuple = parent
snake_case_ : Union[str, Any] = batch_size
snake_case_ : Union[str, Any] = num_channels
snake_case_ : Optional[Any] = image_size
snake_case_ : int = min_resolution
snake_case_ : Any = max_resolution
snake_case_ : Tuple = do_resize
snake_case_ : str = size_divisor
snake_case_ : Optional[Any] = do_rescale
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : Optional[Any] = GLPNImageProcessor if is_vision_available() else None
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = GLPNImageProcessingTester(self )
@property
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__magic_name__ , '''do_resize''' ) )
self.assertTrue(hasattr(__magic_name__ , '''size_divisor''' ) )
self.assertTrue(hasattr(__magic_name__ , '''resample''' ) )
self.assertTrue(hasattr(__magic_name__ , '''do_rescale''' ) )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , numpify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__magic_name__ , torchify=__magic_name__ )
for image in image_inputs:
self.assertIsInstance(__magic_name__ , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
snake_case_ : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 279
| 1
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_lowerCamelCase : List[str] = logging.get_logger(__name__)
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : str ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': (3_2, 6_4),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 3_2,
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 4
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Tuple ):
"""simple docstring"""
return (4, 3_2, 3_2)
@property
def A ( self : Dict ):
"""simple docstring"""
return (4, 3_2, 3_2)
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = {
'sample_size': 3_2,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (3_2, 6_4),
'attention_head_dim': 3_2,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
UpperCamelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
UpperCamelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
UpperCamelCase = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
UpperCamelCase = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
UpperCamelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
UpperCamelCase = noise.to(UpperCamelCase__ )
UpperCamelCase = torch.tensor([1_0] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 ) )
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = UNetaDModel
_SCREAMING_SNAKE_CASE = """sample"""
@property
def A ( self : str , UpperCamelCase__ : Any=(3_2, 3_2) ):
"""simple docstring"""
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1_0] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def A ( self : Optional[Any] ):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def A ( self : Optional[int] ):
"""simple docstring"""
return (3, 3_2, 3_2)
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = {
'block_out_channels': [3_2, 6_4, 6_4, 6_4],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1E-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
UpperCamelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
UpperCamelCase = self.dummy_input
UpperCamelCase = floats_tensor((4, 3) + (2_5_6, 2_5_6) ).to(UpperCamelCase__ )
UpperCamelCase = noise
UpperCamelCase = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (2_5_6, 2_5_6)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_0_9_8_0.7_1_2_9, -2_0_0_2_8.8_5_3_5, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
UpperCamelCase = 4
UpperCamelCase = 3
UpperCamelCase = (3_2, 3_2)
UpperCamelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
UpperCamelCase = torch.tensor(batch_size * [1E-4] ).to(UpperCamelCase__ )
with torch.no_grad():
UpperCamelCase = model(UpperCamelCase__ , UpperCamelCase__ ).sample
UpperCamelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-2 ) )
def A ( self : List[str] ):
"""simple docstring"""
pass
| 249
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["ConvNextFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 249
| 1
|
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__snake_case = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__snake_case = {'''facebook/blenderbot-3B''': 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase_ ( )-> str:
'''simple docstring'''
UpperCAmelCase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
UpperCAmelCase : str =bs[:]
UpperCAmelCase : int =0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCAmelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : Any =[chr(__lowerCAmelCase ) for n in cs]
return dict(zip(__lowerCAmelCase , __lowerCAmelCase ) )
def lowerCAmelCase_ ( __lowerCAmelCase )-> Tuple:
'''simple docstring'''
UpperCAmelCase : List[Any] =set()
UpperCAmelCase : int =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Dict =char
return pairs
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
__lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : int = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case__ , snake_case__ , snake_case__="replace" , snake_case__="<s>" , snake_case__="</s>" , snake_case__="</s>" , snake_case__="<s>" , snake_case__="<unk>" , snake_case__="<pad>" , snake_case__="<mask>" , snake_case__=False , **snake_case__ , ) -> str:
'''simple docstring'''
UpperCAmelCase : List[str] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else bos_token
UpperCAmelCase : Tuple =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else eos_token
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else sep_token
UpperCAmelCase : str =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else cls_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else unk_token
UpperCAmelCase : int =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : Optional[int] =AddedToken(snake_case__ , lstrip=snake_case__ , rstrip=snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else mask_token
super().__init__(
errors=snake_case__ , bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , add_prefix_space=snake_case__ , **snake_case__ , )
with open(snake_case__ , encoding='''utf-8''' ) as vocab_handle:
UpperCAmelCase : int =json.load(snake_case__ )
UpperCAmelCase : Any ={v: k for k, v in self.encoder.items()}
UpperCAmelCase : Optional[int] =errors # how to handle errors in decoding
UpperCAmelCase : List[str] =bytes_to_unicode()
UpperCAmelCase : Any ={v: k for k, v in self.byte_encoder.items()}
with open(snake_case__ , encoding='''utf-8''' ) as merges_handle:
UpperCAmelCase : Dict =merges_handle.read().split('''\n''' )[1:-1]
UpperCAmelCase : Dict =[tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : List[str] =dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
UpperCAmelCase : Optional[int] ={}
UpperCAmelCase : List[str] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : str =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : Optional[Any] =tuple(snake_case__ )
UpperCAmelCase : str =get_pairs(snake_case__ )
if not pairs:
return token
while True:
UpperCAmelCase : str =min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =bigram
UpperCAmelCase : List[Any] =[]
UpperCAmelCase : Optional[int] =0
while i < len(snake_case__ ):
try:
UpperCAmelCase : int =word.index(snake_case__ , snake_case__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Dict =j
if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Any =tuple(snake_case__ )
UpperCAmelCase : Union[str, Any] =new_word
if len(snake_case__ ) == 1:
break
else:
UpperCAmelCase : int =get_pairs(snake_case__ )
UpperCAmelCase : Tuple =''' '''.join(snake_case__ )
UpperCAmelCase : Tuple =word
return word
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Dict =[]
for token in re.findall(self.pat , snake_case__ ):
UpperCAmelCase : str =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case__ ).split(''' ''' ) )
return bpe_tokens
def UpperCAmelCase__ ( self , snake_case__ ) -> List[str]:
'''simple docstring'''
return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self , snake_case__ ) -> Optional[int]:
'''simple docstring'''
return self.decoder.get(snake_case__ )
def UpperCAmelCase__ ( self , snake_case__ ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[Any] =''''''.join(snake_case__ )
UpperCAmelCase : int =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(snake_case__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCAmelCase : str =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase : str =os.path.join(
snake_case__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case__ , ensure_ascii=snake_case__ ) + '''\n''' )
UpperCAmelCase : str =0
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
UpperCAmelCase : List[Any] =token_index
writer.write(''' '''.join(snake_case__ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None , snake_case__ = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Any =[self.sep_token_id]
UpperCAmelCase : Tuple =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self , snake_case__ , snake_case__=False , **snake_case__ ) -> Any:
'''simple docstring'''
UpperCAmelCase : Dict =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case__ ) > 0 and not text[0].isspace()):
UpperCAmelCase : Dict =''' ''' + text
return (text, kwargs)
def UpperCAmelCase__ ( self , snake_case__ , snake_case__ = None ) -> str:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self , snake_case__ ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] =[]
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(snake_case__ )
UpperCAmelCase : List[str] =''' '''.join(snake_case__ )
UpperCAmelCase : str =self.encode(snake_case__ )
if len(snake_case__ ) > self.model_max_length:
UpperCAmelCase : List[Any] =input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 348
|
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
# TODO Update this
__snake_case = {
'''facebook/esm-1b''': '''https://huggingface.co/facebook/esm-1b/resolve/main/config.json''',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class __snake_case ( lowerCamelCase__ ):
__lowerCamelCase : Tuple = """esm"""
def __init__( self , snake_case__=None , snake_case__=None , snake_case__=None , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1026 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__="absolute" , snake_case__=True , snake_case__=None , snake_case__=False , snake_case__=False , snake_case__=None , snake_case__=None , **snake_case__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , mask_token_id=snake_case__ , **snake_case__ )
UpperCAmelCase : List[str] =vocab_size
UpperCAmelCase : str =hidden_size
UpperCAmelCase : List[Any] =num_hidden_layers
UpperCAmelCase : Optional[Any] =num_attention_heads
UpperCAmelCase : str =intermediate_size
UpperCAmelCase : Any =hidden_dropout_prob
UpperCAmelCase : int =attention_probs_dropout_prob
UpperCAmelCase : Dict =max_position_embeddings
UpperCAmelCase : List[str] =initializer_range
UpperCAmelCase : Union[str, Any] =layer_norm_eps
UpperCAmelCase : Dict =position_embedding_type
UpperCAmelCase : Optional[Any] =use_cache
UpperCAmelCase : int =emb_layer_norm_before
UpperCAmelCase : List[str] =token_dropout
UpperCAmelCase : Optional[Any] =is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase : Optional[Any] =EsmFoldConfig()
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase : Optional[int] =EsmFoldConfig(**snake_case__ )
UpperCAmelCase : Tuple =esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase : Any =get_default_vocab_list()
else:
UpperCAmelCase : Tuple =vocab_list
else:
UpperCAmelCase : Optional[int] =None
UpperCAmelCase : Union[str, Any] =None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , snake_case__ ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =super().to_dict()
if isinstance(self.esmfold_config , snake_case__ ):
UpperCAmelCase : str =self.esmfold_config.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : str = None
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : bool = False
__lowerCamelCase : float = 0
__lowerCamelCase : bool = True
__lowerCamelCase : bool = False
__lowerCamelCase : int = 128
__lowerCamelCase : "TrunkConfig" = None
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase : str =TrunkConfig()
elif isinstance(self.trunk , snake_case__ ):
UpperCAmelCase : Optional[int] =TrunkConfig(**self.trunk )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : Optional[Any] =asdict(self )
UpperCAmelCase : Any =self.trunk.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 48
__lowerCamelCase : int = 1024
__lowerCamelCase : int = 128
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : int = 32
__lowerCamelCase : float = 0
__lowerCamelCase : float = 0
__lowerCamelCase : bool = False
__lowerCamelCase : int = 4
__lowerCamelCase : Optional[int] = 128
__lowerCamelCase : "StructureModuleConfig" = None
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase : Any =StructureModuleConfig()
elif isinstance(self.structure_module , snake_case__ ):
UpperCAmelCase : str =StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'''`max_recycles` should be positive, got {self.max_recycles}.''' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f''' {self.sequence_state_dim} and {self.sequence_state_dim}.''' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f''' {self.pairwise_state_dim} and {self.pairwise_state_dim}.''' )
UpperCAmelCase : Optional[int] =self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase : Any =self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f''' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.''' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f''' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.''' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'''`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.''' )
if self.dropout >= 0.4:
raise ValueError(f'''`dropout` should not be greater than 0.4, got {self.dropout}.''' )
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] =asdict(self )
UpperCAmelCase : Tuple =self.structure_module.to_dict()
return output
@dataclass
class __snake_case :
__lowerCamelCase : int = 384
__lowerCamelCase : int = 128
__lowerCamelCase : int = 16
__lowerCamelCase : int = 128
__lowerCamelCase : int = 12
__lowerCamelCase : int = 4
__lowerCamelCase : int = 8
__lowerCamelCase : float = 0.1
__lowerCamelCase : int = 8
__lowerCamelCase : int = 1
__lowerCamelCase : int = 2
__lowerCamelCase : int = 7
__lowerCamelCase : int = 10
__lowerCamelCase : float = 1E-8
__lowerCamelCase : float = 1E5
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return asdict(self )
def lowerCAmelCase_ ( )-> Tuple:
'''simple docstring'''
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 348
| 1
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> list[int]:
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCAmelCase : Any = [True] * (num + 1)
__lowerCAmelCase : Optional[int] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 371
|
def _SCREAMING_SNAKE_CASE ( ) -> list[list[int]]:
return [list(range(1_000 - i , -1_000 - i , -1 ) ) for i in range(1_000 )]
_UpperCAmelCase = generate_large_matrix()
_UpperCAmelCase = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> None:
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[int] ) -> int:
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Tuple = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__lowerCAmelCase : Any = (left + right) // 2
__lowerCAmelCase : Optional[int] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : Any = 0
__lowerCAmelCase : Union[str, Any] = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__lowerCAmelCase : str = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
return len([number for row in grid for number in row if number < 0] )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[int]] ) -> int:
__lowerCAmelCase : List[str] = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def _SCREAMING_SNAKE_CASE ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
__lowerCAmelCase : Tuple = (
"""from __main__ import count_negatives_binary_search, """
"""count_negatives_brute_force, count_negatives_brute_force_with_break, grid"""
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__lowerCAmelCase : Optional[int] = timeit(F'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 232
| 0
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = SwinConfig.from_pretrained(
'microsoft/swin-tiny-patch4-window7-224' , out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
__A = MaskFormerConfig(backbone_config=_UpperCamelCase )
__A = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
__A = 8_4_7
__A = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
__A = 1_5_0
__A = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
__A = 1_7_1
__A = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
__A = 1_3_3
__A = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
__A = 1_9
__A = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
__A = 6_5
__A = '''mapillary-vistas-id2label.json'''
__A = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__A = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def snake_case ( UpperCAmelCase )-> Optional[int]:
"""simple docstring"""
__A = []
# stem
# fmt: off
rename_keys.append(('backbone.patch_embed.proj.weight', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight') )
rename_keys.append(('backbone.patch_embed.proj.bias', 'model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias') )
rename_keys.append(('backbone.patch_embed.norm.weight', 'model.pixel_level_module.encoder.model.embeddings.norm.weight') )
rename_keys.append(('backbone.patch_embed.norm.bias', 'model.pixel_level_module.encoder.model.embeddings.norm.bias') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.attn.proj.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.norm2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.layers.{i}.downsample.reduction.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.weight', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.layers.{i}.downsample.norm.bias', f'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(('sem_seg_head.layer_4.weight', 'model.pixel_level_module.decoder.fpn.stem.0.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.weight', 'model.pixel_level_module.decoder.fpn.stem.1.weight') )
rename_keys.append(('sem_seg_head.layer_4.norm.bias', 'model.pixel_level_module.decoder.fpn.stem.1.bias') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'sem_seg_head.adapter_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((f'sem_seg_head.adapter_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.weight', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((f'sem_seg_head.layer_{source_index}.norm.bias', f'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(('sem_seg_head.mask_features.weight', 'model.pixel_level_module.decoder.mask_projection.weight') )
rename_keys.append(('sem_seg_head.mask_features.bias', 'model.pixel_level_module.decoder.mask_projection.bias') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', f'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', f'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', f'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', f'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', f'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', f'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', f'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.weight', 'model.transformer_module.decoder.layernorm.weight') )
rename_keys.append(('sem_seg_head.predictor.transformer.decoder.norm.bias', 'model.transformer_module.decoder.layernorm.bias') )
# heads on top
rename_keys.append(('sem_seg_head.predictor.query_embed.weight', 'model.transformer_module.queries_embedder.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.weight', 'model.transformer_module.input_projection.weight') )
rename_keys.append(('sem_seg_head.predictor.input_proj.bias', 'model.transformer_module.input_projection.bias') )
rename_keys.append(('sem_seg_head.predictor.class_embed.weight', 'class_predictor.weight') )
rename_keys.append(('sem_seg_head.predictor.class_embed.bias', 'class_predictor.bias') )
for i in range(3 ):
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.weight', f'mask_embedder.{i}.0.weight') )
rename_keys.append((f'sem_seg_head.predictor.mask_embed.layers.{i}.bias', f'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Union[str, Any]:
"""simple docstring"""
__A = dct.pop(_UpperCamelCase )
__A = val
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Any:
"""simple docstring"""
__A = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__A = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__A = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
__A = state_dict.pop(f'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[:dim, :]
__A = in_proj_bias[: dim]
__A = in_proj_weight[
dim : dim * 2, :
]
__A = in_proj_bias[
dim : dim * 2
]
__A = in_proj_weight[
-dim :, :
]
__A = in_proj_bias[-dim :]
# fmt: on
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> str:
"""simple docstring"""
__A = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
__A = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
__A = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[: hidden_size, :]
__A = in_proj_bias[:config.hidden_size]
__A = in_proj_weight[hidden_size : hidden_size * 2, :]
__A = in_proj_bias[hidden_size : hidden_size * 2]
__A = in_proj_weight[-hidden_size :, :]
__A = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
__A = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
__A = state_dict.pop(f'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
__A = in_proj_weight[: hidden_size, :]
__A = in_proj_bias[:config.hidden_size]
__A = in_proj_weight[hidden_size : hidden_size * 2, :]
__A = in_proj_bias[hidden_size : hidden_size * 2]
__A = in_proj_weight[-hidden_size :, :]
__A = in_proj_bias[-hidden_size :]
# fmt: on
def snake_case ( )-> torch.Tensor:
"""simple docstring"""
__A = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__A = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False )-> int:
"""simple docstring"""
__A = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , 'rb' ) as f:
__A = pickle.load(_UpperCamelCase )
__A = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
__A = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
__A = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
__A = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
__A = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, f'Unexpected keys: {unexpected_keys}'
# verify results
__A = prepare_img()
if "vistas" in model_name:
__A = 6_5
elif "cityscapes" in model_name:
__A = 6_5_5_3_5
else:
__A = 2_5_5
__A = True if '''ade''' in model_name else False
__A = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
__A = image_processor(_UpperCamelCase , return_tensors='pt' )
__A = model(**_UpperCamelCase )
print('Logits:' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
__A = torch.tensor(
[[3.6353, -4.4770, -2.6065], [0.5081, -4.2394, -3.5343], [2.1909, -5.0353, -1.9323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print('Pushing model and image processor to the hub...' )
model.push_to_hub(f'nielsr/{model_name}' )
image_processor.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
a__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you\'d like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a__ : int = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 161
|
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCAmelCase_ = datasets.logging.get_logger(__name__)
lowerCAmelCase_ = '''\
@inproceedings{bleurt,
title={BLEURT: Learning Robust Metrics for Text Generation},
author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},
booktitle={ACL},
year={2020},
url={https://arxiv.org/abs/2004.04696}
}
'''
lowerCAmelCase_ = '''\
BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)
and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune
it for your specific application (the latter is expected to perform better).
See the project\'s README at https://github.com/google-research/bleurt#readme for more information.
'''
lowerCAmelCase_ = '''
BLEURT score.
Args:
`predictions` (list of str): prediction/candidate sentences
`references` (list of str): reference sentences
`checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.
Returns:
\'scores\': List of scores.
Examples:
>>> predictions = ["hello there", "general kenobi"]
>>> references = ["hello there", "general kenobi"]
>>> bleurt = datasets.load_metric("bleurt")
>>> results = bleurt.compute(predictions=predictions, references=references)
>>> print([round(v, 2) for v in results["scores"]])
[1.03, 1.04]
'''
lowerCAmelCase_ = {
'''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''',
'''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''',
'''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''',
'''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''',
'''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''',
'''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''',
'''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''',
'''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''',
'''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''',
'''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''',
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def lowerCamelCase (self , __magic_name__ ) -> List[Any]:
'''simple docstring'''
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
snake_case_ : Dict = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
snake_case_ : Optional[int] = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
snake_case_ : Union[str, Any] = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
snake_case_ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
snake_case_ : Dict = score.BleurtScorer(os.path.join(__magic_name__ , __magic_name__ ) )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Dict = self.scorer.score(references=__magic_name__ , candidates=__magic_name__ )
return {"scores": scores}
| 279
| 0
|
"""simple docstring"""
from __future__ import annotations
lowercase__ = []
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(__UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__UpperCamelCase , -1 , -1 ) , range(__UpperCamelCase , len(__UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
if row >= len(__UpperCamelCase ):
solution.append(__UpperCamelCase )
printboard(__UpperCamelCase )
print()
return True
for i in range(len(__UpperCamelCase ) ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
lowerCAmelCase_ : Union[str, Any] = 1
solve(__UpperCamelCase , row + 1 )
lowerCAmelCase_ : List[str] = 0
return False
def __lowerCamelCase ( __UpperCamelCase ) -> None:
"""simple docstring"""
for i in range(len(__UpperCamelCase ) ):
for j in range(len(__UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
lowercase__ = 8
lowercase__ = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 161
|
"""simple docstring"""
import os
def __lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(__UpperCamelCase ) + "/grid.txt" ) as f:
lowerCAmelCase_ : str = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
lowerCAmelCase_ : Dict = 0
# right
for i in range(20 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
lowerCAmelCase_ : Dict = temp
# down
for i in range(17 ):
for j in range(20 ):
lowerCAmelCase_ : Union[str, Any] = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
lowerCAmelCase_ : List[str] = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
lowerCAmelCase_ : Optional[Any] = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
lowerCAmelCase_ : List[Any] = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
lowerCAmelCase_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
lowerCAmelCase_ : str = temp
return maximum
if __name__ == "__main__":
print(solution())
| 161
| 1
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a__ ( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
__lowerCAmelCase = StableDiffusionInstructPixaPixPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __magic_name__ ( self ):
torch.manual_seed(0 )
lowercase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
lowercase : List[str] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
torch.manual_seed(0 )
lowercase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase : List[str] = CLIPTextModel(lowerCAmelCase__ )
lowercase : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase : Union[str, Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __magic_name__ ( self , _a , _a=0 ):
lowercase : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowercase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase : List[str] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" )
if str(lowerCAmelCase__ ).startswith("mps" ):
lowercase : int = torch.manual_seed(lowerCAmelCase__ )
else:
lowercase : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowercase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
lowercase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] = self.get_dummy_components()
lowercase : Any = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
lowercase : Tuple = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase : Optional[Any] = sd_pipe(**lowerCAmelCase__ ).images
lowercase : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : Dict = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Union[str, Any] = self.get_dummy_components()
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
lowercase : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase : List[Any] = "french fries"
lowercase : str = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ )
lowercase : Optional[Any] = output.images
lowercase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : List[Any] = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple = self.get_dummy_components()
lowercase : int = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
lowercase : Dict = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase : int = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase : Union[str, Any] = [inputs["prompt"]] * 2
lowercase : Tuple = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
lowercase : Any = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ )
lowercase : List[str] = image / 2 + 0.5
lowercase : Optional[Any] = image.permute(0 , 3 , 1 , 2 )
lowercase : str = image.repeat(2 , 1 , 1 , 1 )
lowercase : List[Any] = sd_pipe(**lowerCAmelCase__ ).images
lowercase : Optional[int] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
lowercase : Tuple = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : str = self.get_dummy_components()
lowercase : int = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
lowercase : Optional[Any] = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase : int = self.get_dummy_inputs(lowerCAmelCase__ )
lowercase : int = sd_pipe(**lowerCAmelCase__ ).images
lowercase : Dict = image[0, -3:, -3:, -1]
lowercase : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(lowerCAmelCase__ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
lowercase : Tuple = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __magic_name__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __magic_name__ ( self ):
lowercase : Any = self.get_dummy_components()
lowercase : Tuple = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ )
lowercase : Dict = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ )
lowercase : int = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowercase : str = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" ) )[0]
lowercase : str = components["vae"]
lowercase : Any = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowercase : int = vae.encode(inputs[image_param] ).latent_dist.mode()
lowercase : Dict = pipe(**lowerCAmelCase__ )[0]
lowercase : Tuple = np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCAmelCase__ , 1E-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self , _a=0 ):
lowercase : Tuple = torch.manual_seed(lowerCAmelCase__ )
lowercase : List[str] = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
lowercase : Any = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def __magic_name__ ( self ):
lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Any = self.get_inputs()
lowercase : Any = pipe(**lowerCAmelCase__ ).images
lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : int = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
lowercase : Optional[int] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : List[Any] = self.get_inputs()
lowercase : Any = pipe(**lowerCAmelCase__ ).images
lowercase : List[str] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : List[str] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ )
lowercase : int = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Dict = self.get_inputs()
lowercase : Dict = pipe(**lowerCAmelCase__ ).images
lowercase : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
lowercase : Optional[int] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __magic_name__ ( self ):
lowercase : Union[str, Any] = 0
def callback_fn(_a , _a , _a ) -> None:
lowercase : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : Any = latents[0, -3:, -3:, -1]
lowercase : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
lowercase : Any = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
lowercase : int = latents[0, -3:, -3:, -1]
lowercase : Union[str, Any] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
lowercase : Optional[int] = False
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Union[str, Any] = self.get_inputs()
pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __magic_name__ ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa )
lowercase : List[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase : List[Any] = self.get_inputs()
lowercase : Any = pipe(**lowerCAmelCase__ )
lowercase : Tuple = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __magic_name__ ( self ):
lowercase : str = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase : List[Any] = inputs["image"].resize((504, 504) )
lowercase : List[Any] = "timbrooks/instruct-pix2pix"
lowercase : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
lowercase : Optional[Any] = pipe(**lowerCAmelCase__ )
lowercase : Tuple = output.images[0]
lowercase : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
lowercase : Dict = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 202
|
"""simple docstring"""
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCamelCase = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCamelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
__UpperCamelCase = re.compile(r'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
__UpperCamelCase = {
'''DecisionTransformerConfig''',
'''EncoderDecoderConfig''',
'''MusicgenConfig''',
'''RagConfig''',
'''SpeechEncoderDecoderConfig''',
'''TimmBackboneConfig''',
'''VisionEncoderDecoderConfig''',
'''VisionTextDualEncoderConfig''',
'''LlamaConfig''',
}
def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]:
snake_case_ = None
# source code of `config_class`
snake_case_ = inspect.getsource(UpperCAmelCase )
snake_case_ = _re_checkpoint.findall(UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
snake_case_ = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
snake_case_ = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
snake_case_ = ckpt_name
break
return checkpoint
def UpperCAmelCase ( ) -> Union[str, Any]:
snake_case_ = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
snake_case_ = get_checkpoint_from_config_class(UpperCAmelCase )
snake_case_ = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
snake_case_ = '\n'.join(sorted(UpperCAmelCase ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 69
| 0
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A : Optional[int] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowerCamelCase (datasets.BuilderConfig ):
"""simple docstring"""
lowerCamelCase__ = 1_0_0_0_0
lowerCamelCase__ = None
lowerCamelCase__ = None
class lowerCamelCase (datasets.ArrowBasedBuilder ):
"""simple docstring"""
lowerCamelCase__ = ParquetConfig
def __A ( self : List[Any] ) -> Tuple:
return datasets.DatasetInfo(features=self.config.features )
def __A ( self : Optional[int] , __magic_name__ : Any ) -> List[str]:
if not self.config.data_files:
raise ValueError(F'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
SCREAMING_SNAKE_CASE_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__magic_name__ , (str, list, tuple) ):
SCREAMING_SNAKE_CASE_ = data_files
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
SCREAMING_SNAKE_CASE_ = []
for split_name, files in data_files.items():
if isinstance(__magic_name__ , __magic_name__ ):
SCREAMING_SNAKE_CASE_ = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
SCREAMING_SNAKE_CASE_ = [dl_manager.iter_files(__magic_name__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(__magic_name__ ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = datasets.Features.from_arrow_schema(pq.read_schema(__magic_name__ ) )
break
splits.append(datasets.SplitGenerator(name=__magic_name__ , gen_kwargs={"files": files} ) )
return splits
def __A ( self : List[str] , __magic_name__ : pa.Table ) -> List[Any]:
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
SCREAMING_SNAKE_CASE_ = table_cast(__magic_name__ , self.info.features.arrow_schema )
return pa_table
def __A ( self : List[Any] , __magic_name__ : Any ) -> Any:
SCREAMING_SNAKE_CASE_ = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F'''Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'''' )
for file_idx, file in enumerate(itertools.chain.from_iterable(__magic_name__ ) ):
with open(__magic_name__ , "rb" ) as f:
SCREAMING_SNAKE_CASE_ = pq.ParquetFile(__magic_name__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
SCREAMING_SNAKE_CASE_ = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F'''{file_idx}_{batch_idx}''', self._cast_table(__magic_name__ )
except ValueError as e:
logger.error(F'''Failed to read file \'{file}\' with error {type(__magic_name__ )}: {e}''' )
raise
| 354
|
from __future__ import annotations
A : Dict = "#"
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Dict ) -> None:
SCREAMING_SNAKE_CASE_ = {}
def __A ( self : List[Any] , __magic_name__ : str ) -> None:
SCREAMING_SNAKE_CASE_ = self._trie
for char in text:
if char not in trie:
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = trie[char]
SCREAMING_SNAKE_CASE_ = True
def __A ( self : Union[str, Any] , __magic_name__ : str ) -> tuple | list:
SCREAMING_SNAKE_CASE_ = self._trie
for char in prefix:
if char in trie:
SCREAMING_SNAKE_CASE_ = trie[char]
else:
return []
return self._elements(__magic_name__ )
def __A ( self : int , __magic_name__ : dict ) -> tuple:
SCREAMING_SNAKE_CASE_ = []
for c, v in d.items():
SCREAMING_SNAKE_CASE_ = [" "] if c == END else [(c + s) for s in self._elements(__magic_name__ )]
result.extend(__magic_name__ )
return tuple(__magic_name__ )
A : Union[str, Any] = Trie()
A : Optional[int] = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def a__ ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def a__ ( ):
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 305
| 0
|
"""simple docstring"""
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) as f:
__lowercase : Optional[Any] = json.load(__UpperCamelCase )
__lowercase : int = {}
__lowercase : Tuple = []
__lowercase : Dict = []
for key, info in class_info.items():
__lowercase : Tuple = info['''name''']
class_names.append(info['''name'''] )
if info["isthing"]:
thing_ids.append(int(__UpperCamelCase ) )
__lowercase : Any = thing_ids
__lowercase : Tuple = class_names
return metadata
class UpperCAmelCase_ ( unittest.TestCase ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=10 , UpperCamelCase_=False , UpperCamelCase_=2_55 , UpperCamelCase_="shi-labs/oneformer_demo" , UpperCamelCase_="ade20k_panoptic.json" , UpperCamelCase_=10 , ) -> Optional[int]:
__lowercase : Dict = parent
__lowercase : Tuple = batch_size
__lowercase : Tuple = num_channels
__lowercase : List[str] = min_resolution
__lowercase : Tuple = max_resolution
__lowercase : Optional[int] = do_resize
__lowercase : Tuple = {'''shortest_edge''': 32, '''longest_edge''': 13_33} if size is None else size
__lowercase : List[Any] = do_normalize
__lowercase : List[Any] = image_mean
__lowercase : str = image_std
__lowercase : Any = class_info_file
__lowercase : int = prepare_metadata(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : str = num_text
__lowercase : Tuple = repo_path
# for the post_process_functions
__lowercase : int = 2
__lowercase : Dict = 10
__lowercase : Optional[int] = 10
__lowercase : Tuple = 3
__lowercase : Union[str, Any] = 4
__lowercase : int = num_labels
__lowercase : List[Any] = do_reduce_labels
__lowercase : Optional[int] = ignore_index
def _lowerCamelCase ( self ) -> List[str]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> Optional[int]:
if not batched:
__lowercase : List[str] = image_inputs[0]
if isinstance(UpperCamelCase_ , Image.Image ):
__lowercase ,__lowercase : Optional[Any] = image.size
else:
__lowercase ,__lowercase : int = image.shape[1], image.shape[2]
if w < h:
__lowercase : Tuple = int(self.size['''shortest_edge'''] * h / w )
__lowercase : str = self.size['''shortest_edge''']
elif w > h:
__lowercase : List[Any] = self.size['''shortest_edge''']
__lowercase : int = int(self.size['''shortest_edge'''] * w / h )
else:
__lowercase : Dict = self.size['''shortest_edge''']
__lowercase : List[Any] = self.size['''shortest_edge''']
else:
__lowercase : Optional[int] = []
for image in image_inputs:
__lowercase ,__lowercase : Optional[int] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowercase : Any = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0]
__lowercase : Dict = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1]
return expected_height, expected_width
def _lowerCamelCase ( self ) -> Any:
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
UpperCamelCase =image_processing_class
def _lowerCamelCase ( self ) -> Dict:
__lowercase : int = OneFormerImageProcessorTester(self )
@property
def _lowerCamelCase ( self ) -> Optional[int]:
return self.image_processing_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ) -> Any:
__lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''ignore_index''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''class_info_file''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''num_text''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''repo_path''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''metadata''' ) )
self.assertTrue(hasattr(UpperCamelCase_ , '''do_reduce_labels''' ) )
def _lowerCamelCase ( self ) -> int:
pass
def _lowerCamelCase ( self ) -> Optional[Any]:
# Initialize image_processor
__lowercase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowercase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image )
# Test not batched input
__lowercase : Any = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : List[str] = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase ,__lowercase : str = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowercase : Optional[int] = image_processor(
UpperCamelCase_ , ['''semantic'''] * len(UpperCamelCase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> str:
# Initialize image_processor
__lowercase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowercase : Optional[int] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray )
# Test not batched input
__lowercase : List[str] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : Any = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase ,__lowercase : Dict = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowercase : Any = image_processor(
UpperCamelCase_ , ['''semantic'''] * len(UpperCamelCase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self ) -> int:
# Initialize image_processor
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowercase : str = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor )
# Test not batched input
__lowercase : Optional[int] = image_processor(image_inputs[0] , ['''semantic'''] , return_tensors='''pt''' ).pixel_values
__lowercase ,__lowercase : Tuple = self.image_processing_tester.get_expected_values(UpperCamelCase_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowercase ,__lowercase : Union[str, Any] = self.image_processing_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ )
__lowercase : Optional[int] = image_processor(
UpperCamelCase_ , ['''semantic'''] * len(UpperCamelCase_ ) , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowerCamelCase ( self , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_="np" ) -> Dict:
__lowercase : str = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
__lowercase : Dict = self.image_processing_tester.num_labels
__lowercase : Any = None
__lowercase : Union[str, Any] = None
__lowercase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=UpperCamelCase_ )
if with_segmentation_maps:
__lowercase : List[Any] = num_labels
if is_instance_map:
__lowercase : List[str] = list(range(UpperCamelCase_ ) ) * 2
__lowercase : Union[str, Any] = dict(enumerate(UpperCamelCase_ ) )
__lowercase : Dict = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
__lowercase : List[Any] = [Image.fromarray(UpperCamelCase_ ) for annotation in annotations]
__lowercase : Dict = image_processor(
UpperCamelCase_ , ['''semantic'''] * len(UpperCamelCase_ ) , UpperCamelCase_ , return_tensors='''pt''' , instance_id_to_semantic_id=UpperCamelCase_ , pad_and_return_pixel_mask=UpperCamelCase_ , )
return inputs
def _lowerCamelCase ( self ) -> Any:
pass
def _lowerCamelCase ( self ) -> int:
def common(UpperCamelCase_=False , UpperCamelCase_=None ):
__lowercase : Tuple = self.comm_get_image_processor_inputs(
with_segmentation_maps=UpperCamelCase_ , is_instance_map=UpperCamelCase_ , segmentation_type=UpperCamelCase_ )
__lowercase : List[str] = inputs['''mask_labels''']
__lowercase : Dict = inputs['''class_labels''']
__lowercase : Optional[Any] = inputs['''pixel_values''']
__lowercase : Union[str, Any] = inputs['''text_inputs''']
# check the batch_size
for mask_label, class_label, text_input in zip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(UpperCamelCase_ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=UpperCamelCase_ )
common(is_instance_map=UpperCamelCase_ , segmentation_type='''pil''' )
common(is_instance_map=UpperCamelCase_ , segmentation_type='''pil''' )
def _lowerCamelCase ( self ) -> int:
__lowercase : Any = np.zeros((20, 50) )
__lowercase : Tuple = 1
__lowercase : Any = 1
__lowercase : Union[str, Any] = 1
__lowercase : List[Any] = binary_mask_to_rle(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : Dict = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowercase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
__lowercase : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
__lowercase : Tuple = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
__lowercase : int = fature_extractor.post_process_semantic_segmentation(UpperCamelCase_ , target_sizes=UpperCamelCase_ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def _lowerCamelCase ( self ) -> Union[str, Any]:
__lowercase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowercase : Optional[Any] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowercase : List[str] = image_processor.post_process_instance_segmentation(UpperCamelCase_ , threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Any = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file='''ade20k_panoptic.json''' , num_text=self.image_processing_tester.num_text , repo_path='''shi-labs/oneformer_demo''' , )
__lowercase : Optional[int] = self.image_processing_tester.get_fake_oneformer_outputs()
__lowercase : List[Any] = image_processor.post_process_panoptic_segmentation(UpperCamelCase_ , threshold=0 )
self.assertTrue(len(UpperCamelCase_ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue('''segmentation''' in el )
self.assertTrue('''segments_info''' in el )
self.assertEqual(type(el['''segments_info'''] ) , UpperCamelCase_ )
self.assertEqual(
el['''segmentation'''].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 249
|
"""simple docstring"""
from __future__ import annotations
a_ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ):
__lowercase : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the reference grid
__lowercase : Optional[int] = 1
__lowercase : str = [
[0 for col in range(len(grid[0] ) )] for row in range(len(__UpperCamelCase ) )
] # the action grid
__lowercase : List[str] = init[0]
__lowercase : Optional[Any] = init[1]
__lowercase : int = 0
__lowercase : List[Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__lowercase : Optional[Any] = [[f, g, x, y]]
__lowercase : Union[str, Any] = False # flag that is set when search is complete
__lowercase : List[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(__UpperCamelCase ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__lowercase : str = cell.pop()
__lowercase : List[Any] = next_cell[2]
__lowercase : Optional[int] = next_cell[3]
__lowercase : Dict = next_cell[1]
if x == goal[0] and y == goal[1]:
__lowercase : List[Any] = True
else:
for i in range(len(__UpperCamelCase ) ): # to try out different valid actions
__lowercase : Union[str, Any] = x + DIRECTIONS[i][0]
__lowercase : Optional[int] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__lowercase : str = g + cost
__lowercase : Optional[int] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__lowercase : Dict = 1
__lowercase : List[Any] = i
__lowercase : Dict = []
__lowercase : List[Any] = goal[0]
__lowercase : Tuple = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__lowercase : Any = x - DIRECTIONS[action[x][y]][0]
__lowercase : Dict = y - DIRECTIONS[action[x][y]][1]
__lowercase : List[Any] = xa
__lowercase : Optional[Any] = ya
invpath.append([x, y] )
__lowercase : Optional[int] = []
for i in range(len(__UpperCamelCase ) ):
path.append(invpath[len(__UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
a_ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
a_ = [0, 0]
# all coordinates are given in format [y,x]
a_ = [len(grid) - 1, len(grid[0]) - 1]
a_ = 1
# the cost map which pushes the path closer to the goal
a_ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
a_ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
a_ = 9_9
a_ , a_ = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 249
| 1
|
"""simple docstring"""
import re
def lowercase_ ( __UpperCAmelCase ) -> str:
if len(re.findall("""[ATCG]""" , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError("""Invalid Strand""" )
return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 212
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
_A = logging.get_logger(__name__)
class _lowerCamelCase ( a_ ):
def __init__( self : Union[str, Any] , *UpperCamelCase : int , **UpperCamelCase : List[Any] ) -> None:
"""simple docstring"""
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCamelCase , )
super().__init__(*UpperCamelCase , **UpperCamelCase )
| 212
| 1
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase__( __lowercase ):
"""simple docstring"""
a :str = ['image_processor', 'tokenizer']
a :Optional[Any] = 'ChineseCLIPImageProcessor'
a :int = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : Dict ) -> Any:
lowercase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , SCREAMING_SNAKE_CASE_ , )
lowercase_ = kwargs.pop('''feature_extractor''' )
lowercase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = self.image_processor
def __call__( self : Any , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : int=None , **SCREAMING_SNAKE_CASE_ : str ) -> Union[str, Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if images is not None:
lowercase_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if text is not None and images is not None:
lowercase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE_ ) , tensor_type=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : int , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : int ) -> int:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , *SCREAMING_SNAKE_CASE_ : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = self.tokenizer.model_input_names
lowercase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : Dict ) -> Optional[Any]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , SCREAMING_SNAKE_CASE_ , )
return self.image_processor_class
| 30
|
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase : Optional[Any] = TypeVar('T')
class lowerCamelCase__ ( Generic[T]):
'''simple docstring'''
_A = 42 # Cache store of keys
_A = 42 # References of the keys in cache
_A = 1_0 # Maximum capacity of cache
def __init__( self :Optional[Any] , a :int ) -> None:
__UpperCamelCase : Union[str, Any] = deque()
__UpperCamelCase : str = set()
if not n:
__UpperCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError("n should be an integer greater than 0." )
else:
__UpperCamelCase : Any = n
def _lowerCamelCase ( self :Tuple , a :T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__UpperCamelCase : int = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _lowerCamelCase ( self :Any ) -> None:
for k in self.dq_store:
print(a )
def __repr__( self :Tuple ) -> str:
return f'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase : LRUCache[str | int] = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 232
| 0
|
'''simple docstring'''
import re
import string
import numpy as np
import datasets
lowercase__ : Optional[Any] = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowercase__ : Any = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowercase__ : Dict = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , reference_urls=[] , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , ):
'''simple docstring'''
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__A : Optional[Any] = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase) for x in predictions])
__A : Any = np.array([re.sub(_UpperCAmelCase , '' , _UpperCAmelCase) for x in references])
else:
__A : Optional[int] = np.asarray(_UpperCAmelCase)
__A : Optional[Any] = np.asarray(_UpperCAmelCase)
if ignore_case:
__A : List[Any] = np.char.lower(_UpperCAmelCase)
__A : Tuple = np.char.lower(_UpperCAmelCase)
if ignore_punctuation:
__A : Any = string.punctuation.maketrans('' , '' , string.punctuation)
__A : Any = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : str = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
if ignore_numbers:
__A : List[str] = string.digits.maketrans('' , '' , string.digits)
__A : Tuple = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : List[Any] = np.char.translate(_UpperCAmelCase , table=_UpperCAmelCase)
__A : List[str] = predictions == references
return {"exact_match": np.mean(_UpperCAmelCase) * 100}
| 190
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 0
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (GPTaTokenizer, GPTaTokenizerFast))
self.assertGreater(len(_UpperCAmelCase) , 0)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (RobertaTokenizer, RobertaTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 20)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
# Check that tokenizer_type ≠ model_type
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , config=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
self.assertEqual(tokenizer.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : str = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2' , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(_UpperCAmelCase , 'vocab.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='bert')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(_UpperCAmelCase , 'vocab.json'))
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(_UpperCAmelCase , 'merges.txt'))
__A : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase , tokenizer_type='gpt2')
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with pytest.raises(_UpperCAmelCase):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__A : List[Any] = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased')
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , _UpperCAmelCase)
else:
self.assertEqual(tokenizer.do_lower_case , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
_UpperCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__A : str = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = TOKENIZER_MAPPING.values()
__A : Union[str, Any] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__)
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__)
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(_UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=_UpperCAmelCase) , _UpperCAmelCase)
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased') , _UpperCAmelCase)
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=_UpperCAmelCase)
__A : str = 'Hello, world. How are you?'
__A : List[str] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
__A : Dict = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=_UpperCAmelCase)
__A : List[Any] = tokenizer.tokenize(_UpperCAmelCase)
self.assertEqual('[UNK]' , tokens[0])
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config')
self.assertEqual(type(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(tokenizer.model_max_length , 512)
self.assertEqual(tokenizer.vocab_size , 3_0000)
self.assertEqual(tokenizer.unk_token , '[UNK]')
self.assertEqual(tokenizer.padding_side , 'right')
self.assertEqual(tokenizer.truncation_side , 'right')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , (BertTokenizer, BertTokenizerFast))
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , tokenizer.__class__)
self.assertEqual(tokenizera.vocab_size , 12)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = AutoTokenizer.from_pretrained('ctrl')
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = get_tokenizer_config('bert-base-cased')
__A : Optional[int] = config.pop('_commit_hash' , _UpperCAmelCase)
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(_UpperCAmelCase , {'do_lower_case': False})
# This model does not have a tokenizer_config so we get back an empty dict.
__A : Dict = get_tokenizer_config(_UpperCAmelCase)
self.assertDictEqual(_UpperCAmelCase , {})
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Any = get_tokenizer_config(_UpperCAmelCase)
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
__A : Optional[Any] = CustomTokenizer.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : int = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
try:
AutoConfig.register('custom' , _UpperCAmelCase)
# Can register in two steps
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None))
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast))
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_UpperCAmelCase):
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__A : Optional[int] = BertTokenizerFast.from_pretrained(_UpperCAmelCase)
bert_tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = CustomTokenizerFast.from_pretrained(_UpperCAmelCase)
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = AutoTokenizer.from_pretrained(_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase):
__A : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_UpperCAmelCase):
__A : Dict = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
__A : str = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Dict = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase)
self.assertTrue(reloaded_tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : Union[str, Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase)
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(reloaded_tokenizer.special_attribute_present)
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer')
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = False
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = NewTokenizer
lowerCAmelCase = False
try:
AutoConfig.register('custom' , _UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , slow_tokenizer_class=_UpperCAmelCase)
AutoTokenizer.register(_UpperCAmelCase , fast_tokenizer_class=_UpperCAmelCase)
# If remote code is not set, the default is to use local
__A : List[Any] = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer')
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote code is disabled, we load the local one.
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertFalse(tokenizer.special_attribute_present)
__A : Any = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertFalse(tokenizer.special_attribute_present)
# If remote is enabled, we load from the Hub
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
self.assertTrue(tokenizer.special_attribute_present)
__A : Optional[Any] = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
self.assertTrue(tokenizer.special_attribute_present)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast')
# Test we can also load the slow version
__A : int = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=_UpperCAmelCase , use_fast=_UpperCAmelCase)
self.assertTrue(tokenizer.special_attribute_present)
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , 'bert-base is not a local folder and is not a valid model identifier'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
with self.assertRaisesRegex(
_UpperCAmelCase , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__A : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCAmelCase , revision='aaaaaa')
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
__A : Union[str, Any] = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count , 0)
self.assertEqual(counter.head_request_count , 1)
self.assertEqual(counter.other_request_count , 0)
| 190
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
__A = []
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight',
f'stage{idx}.patch_embed.proj.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias',
f'stage{idx}.patch_embed.proj.bias',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight',
f'stage{idx}.patch_embed.norm.weight',
) )
embed.append(
(
f'cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias',
f'stage{idx}.patch_embed.norm.bias',
) )
return embed
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> Any:
"""simple docstring"""
__A = []
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked',
f'stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_q.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_q.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_k.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_k.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight',
f'stage{idx}.blocks.{cnt}.attn.proj_v.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias',
f'stage{idx}.blocks.{cnt}.attn.proj_v.bias',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight',
f'stage{idx}.blocks.{cnt}.attn.proj.weight',
) )
attention_weights.append(
(
f'cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias',
f'stage{idx}.blocks.{cnt}.attn.proj.bias',
) )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight', f'stage{idx}.blocks.{cnt}.mlp.fc2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias', f'stage{idx}.blocks.{cnt}.mlp.fc2.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight', f'stage{idx}.blocks.{cnt}.norm1.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias', f'stage{idx}.blocks.{cnt}.norm1.bias') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight', f'stage{idx}.blocks.{cnt}.norm2.weight') )
attention_weights.append(
(f'cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias', f'stage{idx}.blocks.{cnt}.norm2.bias') )
return attention_weights
def snake_case ( UpperCAmelCase )-> Tuple:
"""simple docstring"""
__A = []
token.append((f'cvt.encoder.stages.{idx}.cls_token', 'stage2.cls_token') )
return token
def snake_case ( )-> Tuple:
"""simple docstring"""
__A = []
head.append(('layernorm.weight', 'norm.weight') )
head.append(('layernorm.bias', 'norm.bias') )
head.append(('classifier.weight', 'head.weight') )
head.append(('classifier.bias', 'head.bias') )
return head
def snake_case ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = 'imagenet-1k-id2label.json'
__A = 1_0_0_0
__A = 'huggingface/label-files'
__A = num_labels
__A = json.load(open(cached_download(hf_hub_url(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) ) , 'r' ) )
__A = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
__A = idalabel
__A = {v: k for k, v in idalabel.items()}
__A = __A = CvtConfig(num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('/' , 1 )[-1][4:6] == "13":
__A = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('/' , 1 )[-1][4:6] == "21":
__A = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
__A = [2, 2, 2_0]
__A = [3, 1_2, 1_6]
__A = [1_9_2, 7_6_8, 1_0_2_4]
__A = CvtForImageClassification(UpperCAmelCase )
__A = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
__A = image_size
__A = torch.load(UpperCAmelCase , map_location=torch.device('cpu' ) )
__A = OrderedDict()
__A = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
__A = list_of_state_dict + cls_token(UpperCAmelCase )
__A = list_of_state_dict + embeddings(UpperCAmelCase )
for cnt in range(config.depth[idx] ):
__A = list_of_state_dict + attention(UpperCAmelCase , UpperCAmelCase )
__A = list_of_state_dict + final()
for gg in list_of_state_dict:
print(UpperCAmelCase )
for i in range(len(UpperCAmelCase ) ):
__A = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
a__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--cvt_model",
default="cvt-w24",
type=str,
help="Name of the cvt model you'd like to convert.",
)
parser.add_argument(
"--image_size",
default=3_8_4,
type=int,
help="Input Image Size",
)
parser.add_argument(
"--cvt_file_name",
default=R"cvtmodels\CvT-w24-384x384-IN-22k.pth",
type=str,
help="Input Image Size",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
a__ : str = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 161
|
'''simple docstring'''
import functools
def snake_case ( UpperCAmelCase , UpperCAmelCase )-> int:
"""simple docstring"""
# Validation
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(UpperCAmelCase ) == 0:
return 0
if min(UpperCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(UpperCAmelCase ) >= 3_6_6:
raise ValueError('All days elements should be less than 366' )
__A = set(UpperCAmelCase )
@functools.cache
def dynamic_programming(UpperCAmelCase ) -> int:
if index > 3_6_5:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 3_0 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161
| 1
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__ :
'''simple docstring'''
def __init__( self ) -> Tuple:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2_56
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
lowerCAmelCase__ = cva.imread(lowerCamelCase_ , 0 )
lowerCAmelCase__ = copy.deepcopy(self.img )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='''x''' )
lowerCAmelCase__ = np.sum(lowerCamelCase_ )
for i in range(len(lowerCamelCase_ ) ):
lowerCAmelCase__ = x[i] / self.k
self.sk += prk
lowerCAmelCase__ = (self.L - 1) * self.sk
if self.rem != 0:
lowerCAmelCase__ = int(last % last )
lowerCAmelCase__ = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase_ )
lowerCAmelCase__ = int(np.ma.count(self.img ) / self.img[1].size )
lowerCAmelCase__ = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowerCAmelCase__ = self.img[j][i]
if num != self.last_list[num]:
lowerCAmelCase__ = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
__UpperCAmelCase = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__UpperCAmelCase = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 228
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _snake_case ( A , A , A , A , ) -> list[float]:
lowerCAmelCase__ , lowerCAmelCase__ = coefficient_matrix.shape
lowerCAmelCase__ , lowerCAmelCase__ = constant_matrix.shape
if rowsa != colsa:
lowerCAmelCase__ = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(A )
if colsa != 1:
lowerCAmelCase__ = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(A )
if rowsa != rowsa:
lowerCAmelCase__ = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(A )
if len(A ) != rowsa:
lowerCAmelCase__ = (
'''Number of initial values must be equal to number of rows in coefficient '''
F"""matrix but received {len(A )} and {rowsa}"""
)
raise ValueError(A )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
lowerCAmelCase__ = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
strictly_diagonally_dominant(A )
# Iterates the whole matrix for given number of times
for _ in range(A ):
lowerCAmelCase__ = []
for row in range(A ):
lowerCAmelCase__ = 0
for col in range(A ):
if col == row:
lowerCAmelCase__ = table[row][col]
elif col == cols - 1:
lowerCAmelCase__ = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
lowerCAmelCase__ = (temp + val) / denom
new_val.append(A )
lowerCAmelCase__ = new_val
return [float(A ) for i in new_val]
def _snake_case ( A ) -> bool:
lowerCAmelCase__ , lowerCAmelCase__ = table.shape
lowerCAmelCase__ = True
for i in range(0 , A ):
lowerCAmelCase__ = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 228
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : float , lowercase : float ) -> float:
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
A : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A : List[Any] = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
order.append(__magic_name__ )
return order
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] , __magic_name__ : int , __magic_name__ : list[bool] ) -> list[int]:
"""simple docstring"""
lowercase__ = True
lowercase__ = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(__magic_name__ , __magic_name__ , __magic_name__ )
return component
def UpperCamelCase ( __magic_name__ : dict[int, list[int]] ) -> list[list[int]]:
"""simple docstring"""
lowercase__ = len(__magic_name__ ) * [False]
lowercase__ = {vert: [] for vert in range(len(__magic_name__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(__magic_name__ )
lowercase__ = []
for i, was_visited in enumerate(__magic_name__ ):
if not was_visited:
order += topology_sort(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = []
lowercase__ = len(__magic_name__ ) * [False]
for i in range(len(__magic_name__ ) ):
lowercase__ = order[len(__magic_name__ ) - i - 1]
if not visited[vert]:
lowercase__ = find_components(__magic_name__ , __magic_name__ , __magic_name__ )
components_list.append(__magic_name__ )
return components_list
| 305
| 0
|
import argparse
import shlex
import runhouse as rh
if __name__ == "__main__":
# Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access
# setup instructions, if using on-demand hardware
# If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster
# If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster
# Throw an error if user passes both BYO and on-demand cluster args
# Otherwise, use default values
UpperCAmelCase_ : int = argparse.ArgumentParser()
parser.add_argument('--user', type=str, default='ubuntu')
parser.add_argument('--host', type=str, default='localhost')
parser.add_argument('--key_path', type=str, default=None)
parser.add_argument('--instance', type=str, default='V100:1')
parser.add_argument('--provider', type=str, default='cheapest')
parser.add_argument('--use_spot', type=bool, default=False)
parser.add_argument('--example', type=str, default='pytorch/text-generation/run_generation.py')
UpperCAmelCase_ , UpperCAmelCase_ : Dict = parser.parse_known_args()
if args.host != "localhost":
if args.instance != "V100:1" or args.provider != "cheapest":
raise ValueError('Cannot specify both BYO and on-demand cluster args')
UpperCAmelCase_ : Optional[Any] = rh.cluster(
name='rh-cluster', ips=[args.host], ssh_creds={'ssh_user': args.user, 'ssh_private_key': args.key_path}
)
else:
UpperCAmelCase_ : List[str] = rh.cluster(
name='rh-cluster', instance_type=args.instance, provider=args.provider, use_spot=args.use_spot
)
UpperCAmelCase_ : str = args.example.rsplit('/', 1)[0]
# Set up remote environment
cluster.install_packages(['pip:./']) # Installs transformers from local source
# Note transformers is copied into the home directory on the remote machine, so we can install from there
cluster.run([F'pip install -r transformers/examples/{example_dir}/requirements.txt'])
cluster.run(['pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117'])
# Run example. You can bypass the CLI wrapper and paste your own code here.
cluster.run([F'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}'])
# Alternatively, we can just import and run a training function (especially if there's no wrapper CLI):
# from my_script... import train
# reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard']
# launch_train_gpu = rh.function(fn=train,
# system=gpu,
# reqs=reqs,
# name='train_bert_glue')
#
# We can pass in arguments just like we would to a function:
# launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16
# stream_logs=True)
| 120
|
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : str = {
'Salesforce/instruct-blip-flan-t5': 'https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : int = '''instructblip_vision_model'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=1_4_0_8 , SCREAMING_SNAKE_CASE__ : int=6_1_4_4 , SCREAMING_SNAKE_CASE__ : Dict=3_9 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_2_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 , SCREAMING_SNAKE_CASE__ : List[Any]="gelu" , SCREAMING_SNAKE_CASE__ : List[str]=1E-6 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=1E-10 , SCREAMING_SNAKE_CASE__ : str=True , **SCREAMING_SNAKE_CASE__ : Dict , ) -> int:
super().__init__(**SCREAMING_SNAKE_CASE__ )
a_ : List[str] = hidden_size
a_ : Any = intermediate_size
a_ : str = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : str = patch_size
a_ : Any = image_size
a_ : Dict = initializer_range
a_ : List[Any] = attention_dropout
a_ : Union[str, Any] = layer_norm_eps
a_ : Optional[Any] = hidden_act
a_ : Optional[Any] = qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Dict = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : List[Any] = '''instructblip_qformer'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_0_5_2_2 , SCREAMING_SNAKE_CASE__ : str=7_6_8 , SCREAMING_SNAKE_CASE__ : List[str]=1_2 , SCREAMING_SNAKE_CASE__ : Any=1_2 , SCREAMING_SNAKE_CASE__ : Optional[int]=3_0_7_2 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.1 , SCREAMING_SNAKE_CASE__ : int=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : int=1E-12 , SCREAMING_SNAKE_CASE__ : Dict=0 , SCREAMING_SNAKE_CASE__ : Optional[Any]="absolute" , SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE__ : str=1_4_0_8 , **SCREAMING_SNAKE_CASE__ : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = vocab_size
a_ : List[str] = hidden_size
a_ : Any = num_hidden_layers
a_ : Dict = num_attention_heads
a_ : Optional[int] = hidden_act
a_ : List[str] = intermediate_size
a_ : Tuple = hidden_dropout_prob
a_ : Union[str, Any] = attention_probs_dropout_prob
a_ : List[str] = max_position_embeddings
a_ : List[str] = initializer_range
a_ : Any = layer_norm_eps
a_ : Tuple = position_embedding_type
a_ : List[str] = cross_attention_frequency
a_ : Union[str, Any] = encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
a_ , a_ : Union[str, Any] = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('model_type' ) == "instructblip":
a_ : Dict = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
snake_case__ : Union[str, Any] = '''instructblip'''
snake_case__ : List[str] = True
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : int=3_2 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Tuple:
super().__init__(**SCREAMING_SNAKE_CASE__ )
if vision_config is None:
a_ : Dict = {}
logger.info('vision_config is None. initializing the InstructBlipVisionConfig with default values.' )
if qformer_config is None:
a_ : List[str] = {}
logger.info('qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.' )
if text_config is None:
a_ : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
a_ : int = InstructBlipVisionConfig(**SCREAMING_SNAKE_CASE__ )
a_ : int = InstructBlipQFormerConfig(**SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = text_config['model_type'] if 'model_type' in text_config else 'opt'
a_ : List[Any] = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE__ )
a_ : Tuple = self.text_config.tie_word_embeddings
a_ : List[Any] = self.text_config.is_encoder_decoder
a_ : Optional[Any] = num_query_tokens
a_ : int = self.vision_config.hidden_size
a_ : List[str] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
a_ : Optional[int] = 1.0
a_ : Any = 0.02
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , SCREAMING_SNAKE_CASE__ : InstructBlipVisionConfig , SCREAMING_SNAKE_CASE__ : InstructBlipQFormerConfig , SCREAMING_SNAKE_CASE__ : PretrainedConfig , **SCREAMING_SNAKE_CASE__ : List[str] , ) -> Any:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE__ , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : List[Any] = copy.deepcopy(self.__dict__ )
a_ : Optional[int] = self.vision_config.to_dict()
a_ : Optional[Any] = self.qformer_config.to_dict()
a_ : Union[str, Any] = self.text_config.to_dict()
a_ : Optional[Any] = self.__class__.model_type
return output
| 120
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class A__ ( __magic_name__ ):
lowercase = 42
class A__ ( __magic_name__ , __magic_name__ ):
@register_to_config
def __init__( self : str , a : int = 16 , a : int = 88 , a : Optional[int] = None , a : Optional[int] = None , a : int = 1 , a : float = 0.0 , a : int = 32 , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : str = "geglu" , a : bool = True , a : bool = True , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : str = attention_head_dim
lowerCAmelCase__ : int = num_attention_heads * attention_head_dim
lowerCAmelCase__ : Optional[Any] = in_channels
lowerCAmelCase__ : Tuple = torch.nn.GroupNorm(num_groups=a , num_channels=a , eps=1E-6 , affine=a )
lowerCAmelCase__ : Tuple = nn.Linear(a , a )
# 3. Define transformers blocks
lowerCAmelCase__ : Tuple = nn.ModuleList(
[
BasicTransformerBlock(
a , a , a , dropout=a , cross_attention_dim=a , activation_fn=a , attention_bias=a , double_self_attention=a , norm_elementwise_affine=a , )
for d in range(a )
] )
lowerCAmelCase__ : Any = nn.Linear(a , a )
def _lowerCamelCase ( self : Union[str, Any] , a : str , a : int=None , a : Any=None , a : Dict=None , a : List[Any]=1 , a : List[str]=None , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = hidden_states.shape
lowerCAmelCase__ : Dict = batch_frames // num_frames
lowerCAmelCase__ : Union[str, Any] = hidden_states
lowerCAmelCase__ : Dict = hidden_states[None, :].reshape(a , a , a , a , a )
lowerCAmelCase__ : Union[str, Any] = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
lowerCAmelCase__ : Optional[Any] = self.norm(a )
lowerCAmelCase__ : Dict = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , a , a )
lowerCAmelCase__ : Union[str, Any] = self.proj_in(a )
# 2. Blocks
for block in self.transformer_blocks:
lowerCAmelCase__ : str = block(
a , encoder_hidden_states=a , timestep=a , cross_attention_kwargs=a , class_labels=a , )
# 3. Output
lowerCAmelCase__ : Dict = self.proj_out(a )
lowerCAmelCase__ : Union[str, Any] = (
hidden_states[None, None, :]
.reshape(a , a , a , a , a )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
lowerCAmelCase__ : Optional[int] = hidden_states.reshape(a , a , a , a )
lowerCAmelCase__ : str = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=a )
| 212
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = AlbertTokenizer
lowercase = AlbertTokenizerFast
lowercase = True
lowercase = True
lowercase = True
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowerCAmelCase__ : int = AlbertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'this is a test'
lowerCAmelCase__ : List[Any] = 'this is a test'
return input_text, output_text
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = '<pad>'
lowerCAmelCase__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '<unk>' )
self.assertEqual(vocab_keys[-1] , '▁eloquent' )
self.assertEqual(len(a ) , 30_000 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : str = self.get_tokenizer()
lowerCAmelCase__ : str = self.get_rust_tokenizer()
lowerCAmelCase__ : List[Any] = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : str = tokenizer.tokenize(a )
lowerCAmelCase__ : Optional[int] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Optional[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Dict = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AlbertTokenizer(a , keep_accents=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁this', '▁is', '▁a', '▁test'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [48, 25, 21, 1_289] )
lowerCAmelCase__ : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.'] )
lowerCAmelCase__ : Any = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCAmelCase__ : Any = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.'] , )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = AlbertTokenizer(a )
lowerCAmelCase__ : Tuple = tokenizer.encode('sequence builders' )
lowerCAmelCase__ : Any = tokenizer.encode('multi-sequence build' )
lowerCAmelCase__ : Dict = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : Tuple = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'input_ids': [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='albert-base-v2' , revision='6b6560eaf5ff2e250b00c50f380c5389a9c2d82e' , )
| 212
| 1
|
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def snake_case_ ( ) -> Optional[int]:
lowercase__: Dict = 9, 14 # noqa: F841
lowercase__: List[Any] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
lowercase__: str = defaultdict(_lowerCAmelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
lowercase__: Tuple = mst(_lowerCAmelCase )
lowercase__: Tuple = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
lowercase__: List[Any] = tuple(answer[:2] )
lowercase__: Any = tuple(edge[::-1] )
assert edge in result or reverse in result
| 371
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ = 101 ) -> Any:
'''simple docstring'''
lowercase__: Any = length
def __len__( self ) -> List[Any]:
'''simple docstring'''
return self.length
def __getitem__( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return i
class __a :
def __call__( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
return {"input_ids": torch.tensor(lowerCAmelCase__ ), "labels": torch.tensor(lowerCAmelCase__ )}
class __a ( nn.Module ):
def __init__( self ) -> Tuple:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowercase__: List[str] = nn.Linear(120 , 80 )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> int:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class __a ( __UpperCamelCase ):
@require_torch_neuroncore
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: int = F'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: Optional[int] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __a ( __UpperCamelCase ):
@require_torch_multi_gpu
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = F'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
lowercase__: Tuple = self.get_auto_remove_tmp_dir()
lowercase__: List[str] = F'--output_dir {output_dir}'.split()
lowercase__: int = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__lowerCAmelCase = HfArgumentParser((TrainingArguments,))
__lowerCAmelCase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
__lowerCAmelCase = DummyDataset(dataset_length)
def snake_case_ ( snake_case ) -> Dict:
lowercase__: str = list(range(len(snake_case ) ) )
lowercase__: Tuple = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
__lowerCAmelCase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = 2
__lowerCAmelCase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__lowerCAmelCase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__lowerCAmelCase = None
| 288
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase__ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (a__ , a__ ):
lowerCAmelCase = '''maskformer-swin'''
lowerCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=4 , _UpperCAmelCase=3 , _UpperCAmelCase=96 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[3, 6, 12, 24] , _UpperCAmelCase=7 , _UpperCAmelCase=4.0 , _UpperCAmelCase=True , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.1 , _UpperCAmelCase="gelu" , _UpperCAmelCase=False , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**_UpperCAmelCase)
__A : Any = image_size
__A : List[Any] = patch_size
__A : Tuple = num_channels
__A : Union[str, Any] = embed_dim
__A : Dict = depths
__A : Tuple = len(_UpperCAmelCase)
__A : Optional[Any] = num_heads
__A : Any = window_size
__A : str = mlp_ratio
__A : Dict = qkv_bias
__A : Optional[int] = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : List[str] = drop_path_rate
__A : Any = hidden_act
__A : Optional[Any] = use_absolute_embeddings
__A : Union[str, Any] = layer_norm_eps
__A : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__A : Dict = int(embed_dim * 2 ** (len(_UpperCAmelCase) - 1))
__A : Any = ['stem'] + [F'stage{idx}' for idx in range(1 , len(_UpperCAmelCase) + 1)]
__A ,__A : Optional[int] = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names)
| 190
|
'''simple docstring'''
lowercase__ : Dict = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 190
| 1
|
"""simple docstring"""
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __snake_case ( datasets.BuilderConfig):
snake_case__ : Optional[datasets.Features] = None
def snake_case_ ( A_ : "pyspark.sql.DataFrame", A_ : List[int], ):
'''simple docstring'''
import pyspark
def generate_fn():
_lowerCamelCase : int = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) )
for partition_id in partition_order:
_lowerCamelCase : Any = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' )
_lowerCamelCase : Optional[int] = partition_df.collect()
_lowerCamelCase : List[str] = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __snake_case ( _BaseExamplesIterable):
def __init__( self : Tuple , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : Optional[int]=None , ):
"""simple docstring"""
_lowerCamelCase : Dict = df
_lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() )
_lowerCamelCase : Dict = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
"""simple docstring"""
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : np.random.Generator ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(__lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase )
return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return len(self.partition_order )
class __snake_case ( datasets.DatasetBuilder):
snake_case__ : List[Any] = SparkConfig
def __init__( self : Union[str, Any] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[int] = pyspark.sql.SparkSession.builder.getOrCreate()
_lowerCamelCase : int = df
_lowerCamelCase : Any = working_dir
super().__init__(
cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
def create_cache_and_write_probe(__lowerCAmelCase : Optional[int] ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase )
_lowerCamelCase : Optional[int] = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(__lowerCAmelCase , '''a''' )
return [probe_file]
if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
_lowerCamelCase : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
'''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ):
"""simple docstring"""
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
import pyspark
def get_arrow_batch_size(__lowerCAmelCase : Dict ):
for batch in it:
yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} )
_lowerCamelCase : Any = self.df.count()
_lowerCamelCase : Union[str, Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
_lowerCamelCase : List[Any] = (
self.df.limit(__lowerCAmelCase )
.repartition(1 )
.mapInArrow(__lowerCAmelCase , '''batch_bytes: long''' )
.agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
_lowerCamelCase : Dict = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
_lowerCamelCase : List[str] = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) )
_lowerCamelCase : Optional[int] = self.df.repartition(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ):
"""simple docstring"""
import pyspark
_lowerCamelCase : Optional[Any] = ParquetWriter if file_format == '''parquet''' else ArrowWriter
_lowerCamelCase : List[Any] = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath
_lowerCamelCase : Dict = file_format == '''parquet'''
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
_lowerCamelCase : str = self.config.features
_lowerCamelCase : Dict = self._writer_batch_size
_lowerCamelCase : List[str] = self._fs.storage_options
def write_arrow(__lowerCAmelCase : List[str] ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
_lowerCamelCase : List[str] = pyspark.TaskContext().taskAttemptId()
_lowerCamelCase : Any = next(__lowerCAmelCase , __lowerCAmelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Optional[int] = writer_class(
features=__lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : int = pa.Table.from_batches([first_batch] )
writer.write_table(__lowerCAmelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
_lowerCamelCase , _lowerCamelCase : Any = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
shard_id += 1
_lowerCamelCase : Optional[int] = writer_class(
features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , )
_lowerCamelCase : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(__lowerCAmelCase )
if writer._num_bytes > 0:
_lowerCamelCase , _lowerCamelCase : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ):
_lowerCamelCase : Optional[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) )
shutil.move(__lowerCAmelCase , __lowerCAmelCase )
_lowerCamelCase : List[Any] = (
self.df.mapInArrow(__lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' )
.groupBy('''task_id''' )
.agg(
pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE ( self : Tuple , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Tuple , ):
"""simple docstring"""
self._validate_cache_dir()
_lowerCamelCase : str = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(__lowerCAmelCase )
_lowerCamelCase : str = not is_remote_filesystem(self._fs )
_lowerCamelCase : Tuple = os.path.join if is_local else posixpath.join
_lowerCamelCase : int = '''-TTTTT-SSSSS-of-NNNNN'''
_lowerCamelCase : Tuple = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
_lowerCamelCase : List[Any] = path_join(self._output_dir , __lowerCAmelCase )
_lowerCamelCase : List[Any] = 0
_lowerCamelCase : Any = 0
_lowerCamelCase : str = 0
_lowerCamelCase : int = []
_lowerCamelCase : List[str] = []
for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
(
(
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) , (
_lowerCamelCase
) ,
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(__lowerCAmelCase )
_lowerCamelCase : int = total_num_examples
_lowerCamelCase : str = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
_lowerCamelCase : Optional[Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
_lowerCamelCase : str = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
__lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
rename(
__lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Any = 0
for i in range(len(__lowerCAmelCase ) ):
_lowerCamelCase , _lowerCamelCase : Dict = task_id_and_num_shards[i]
for shard_id in range(__lowerCAmelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect()
else:
# don't use any pattern
_lowerCamelCase : Any = 0
_lowerCamelCase : List[str] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '''''' ) , )
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "datasets.SplitGenerator" , ):
"""simple docstring"""
return SparkExamplesIterable(self.df )
| 175
|
"""simple docstring"""
def snake_case_ ( A_ : list ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] = len(A_ )
for i in range(1, A_ ):
_lowerCamelCase : Tuple = collection[i]
_lowerCamelCase : Dict = 0
_lowerCamelCase : Any = i - 1
while low <= high:
_lowerCamelCase : Optional[int] = (low + high) // 2
if val < collection[mid]:
_lowerCamelCase : List[str] = mid - 1
else:
_lowerCamelCase : Dict = mid + 1
for j in range(A_, A_, -1 ):
_lowerCamelCase : Optional[int] = collection[j - 1]
_lowerCamelCase : Tuple = val
return collection
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 175
| 1
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = MobileBertTokenizer
UpperCamelCase__ = MobileBertTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = filter_non_english
UpperCamelCase__ = '''google/mobilebert-uncased'''
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
super().setUp()
a = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
a = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[Any] ):
'''simple docstring'''
a = """UNwant\u00E9d,running"""
a = """unwanted, running"""
return input_text, output_text
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = self.tokenizer_class(self.vocab_file )
a = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__magic_name__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [9, 6, 7, 12, 10, 11] )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = """UNwant\u00E9d,running"""
a = tokenizer.tokenize(__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
# With lower casing
a = self.get_tokenizer(do_lower_case=__magic_name__ )
a = self.get_rust_tokenizer(do_lower_case=__magic_name__ )
a = """UNwant\u00E9d,running"""
a = tokenizer.tokenize(__magic_name__ )
a = rust_tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = self.get_rust_tokenizer()
a = tokenizer.encode(__magic_name__ )
a = rust_tokenizer.encode(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = BasicTokenizer(do_lower_case=__magic_name__ , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a = {}
for i, token in enumerate(__magic_name__ ):
a = i
a = WordpieceTokenizer(vocab=__magic_name__ , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__magic_name__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__magic_name__ ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
a = tokenizer.encode("""sequence builders""" , add_special_tokens=__magic_name__ )
a = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__magic_name__ )
a = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
a = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
a = tokenizer_r.encode_plus(
__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ , )
a = tokenizer_r.do_lower_case if hasattr(__magic_name__ , """do_lower_case""" ) else False
a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = ["""的""", """人""", """有"""]
a = """""".join(__magic_name__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
a = True
a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
a = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
a = False
a = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
a = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
a = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
a = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that only the first Chinese character is not preceded by "##".
a = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__magic_name__ )
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
| 228
|
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class __lowerCAmelCase ( pl.LightningModule ):
def __init__( self :Union[str, Any] , __magic_name__ :Optional[int] ):
'''simple docstring'''
super().__init__()
a = model
a = 2
a = nn.Linear(self.model.config.hidden_size , self.num_labels )
def lowerCamelCase__ ( self :Dict ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
# load longformer model from model identifier
a = LongformerModel.from_pretrained(__lowerCamelCase )
a = LightningModel(__lowerCamelCase )
a = torch.load(__lowerCamelCase , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
a = LongformerForQuestionAnswering.from_pretrained(__lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__lowerCamelCase )
print(f'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
__UpperCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : List[Any] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 228
| 1
|
def lowerCAmelCase__ ( _a : int , _a : Dict ):
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def lowerCAmelCase__ ( _a : Dict , _a : List[str]=0 ):
return sorted(_a , key=lambda _a : x[column] )
def lowerCAmelCase__ ( _a : str , _a : str , _a : str=float("inf" ) ):
for i in range(points_counts - 1 ):
for j in range(i + 1 , _a ):
snake_case_ : int = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : int = current_dis
return min_dis
def lowerCAmelCase__ ( _a : List[Any] , _a : str , _a : Dict=float("inf" ) ):
for i in range(min(6 , points_counts - 1 ) , _a ):
for j in range(max(0 , i - 6 ) , _a ):
snake_case_ : List[Any] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
snake_case_ : int = current_dis
return min_dis
def lowerCAmelCase__ ( _a : Any , _a : Dict , _a : List[str] ):
# base case
if points_counts <= 3:
return dis_between_closest_pair(_a , _a )
# recursion
snake_case_ : Any = points_counts // 2
snake_case_ : Optional[Any] = closest_pair_of_points_sqr(
_a , points_sorted_on_y[:mid] , _a )
snake_case_ : Optional[int] = closest_pair_of_points_sqr(
_a , points_sorted_on_y[mid:] , points_counts - mid )
snake_case_ : List[Any] = min(_a , _a )
snake_case_ : Any = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(_a )
snake_case_ : Any = dis_between_closest_in_strip(
_a , len(_a ) , _a )
return min(_a , _a )
def lowerCAmelCase__ ( _a : Optional[int] , _a : List[str] ):
snake_case_ : List[Any] = column_based_sort(_a , column=0 )
snake_case_ : Dict = column_based_sort(_a , column=1 )
return (
closest_pair_of_points_sqr(
_a , _a , _a )
) ** 0.5
if __name__ == "__main__":
lowercase : Tuple = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print('''Distance:''', closest_pair_of_points(points, len(points)))
| 36
|
lowercase : Optional[int] = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 36
| 1
|
'''simple docstring'''
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
__A : str = ""
__A : int = ""
__A : List[Any] = ""
__A : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = get_dataset(A__ , A__ )
print("""Processing...""" )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = update_image_and_anno(A__ , A__ , A__ )
for index, image in enumerate(A__ ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase_ : Optional[int] = random_chars(32 )
lowerCAmelCase_ : List[Any] = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
lowerCAmelCase_ : Optional[Any] = f'{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'
cva.imwrite(f'/{file_root}.jpg' , A__ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'Success {index+1}/{len(A__ )} with {file_name}' )
lowerCAmelCase_ : List[str] = []
for anno in new_annos[index]:
lowerCAmelCase_ : Dict = f'{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'
annos_list.append(A__ )
with open(f'/{file_root}.txt' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def UpperCamelCase_ ( A__ : str , A__ : str ):
'''simple docstring'''
lowerCAmelCase_ : int = []
lowerCAmelCase_ : Any = []
for label_file in glob.glob(os.path.join(A__ , """*.txt""" ) ):
lowerCAmelCase_ : Optional[Any] = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(A__ ) as in_file:
lowerCAmelCase_ : Any = in_file.readlines()
lowerCAmelCase_ : Optional[int] = os.path.join(A__ , f'{label_name}.jpg' )
lowerCAmelCase_ : Dict = []
for obj_list in obj_lists:
lowerCAmelCase_ : int = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(A__ )
labels.append(A__ )
return img_paths, labels
def UpperCamelCase_ ( A__ : list , A__ : list , A__ : int = 1 ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = []
lowerCAmelCase_ : Optional[Any] = []
lowerCAmelCase_ : Optional[Any] = []
for idx in range(len(A__ ) ):
lowerCAmelCase_ : Any = []
lowerCAmelCase_ : int = img_list[idx]
path_list.append(A__ )
lowerCAmelCase_ : List[str] = anno_list[idx]
lowerCAmelCase_ : Optional[int] = cva.imread(A__ )
if flip_type == 1:
lowerCAmelCase_ : List[str] = cva.flip(A__ , A__ )
for bbox in img_annos:
lowerCAmelCase_ : int = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowerCAmelCase_ : Any = cva.flip(A__ , A__ )
for bbox in img_annos:
lowerCAmelCase_ : List[Any] = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(A__ )
new_imgs_list.append(A__ )
return new_imgs_list, new_annos_lists, path_list
def UpperCamelCase_ ( A__ : int = 32 ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase_ : Union[str, Any] = ascii_lowercase + digits
return "".join(random.choice(A__ ) for _ in range(A__ ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 120
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'gpt_neox'
def __init__( self : Optional[int] , lowerCamelCase : Tuple=5_04_32 , lowerCamelCase : Optional[int]=61_44 , lowerCamelCase : Tuple=44 , lowerCamelCase : Any=64 , lowerCamelCase : List[Any]=2_45_76 , lowerCamelCase : List[Any]="gelu" , lowerCamelCase : Optional[Any]=0.25 , lowerCamelCase : Any=1_00_00 , lowerCamelCase : Any=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=20_48 , lowerCamelCase : List[Any]=0.02 , lowerCamelCase : Any=1E-5 , lowerCamelCase : Dict=True , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=2 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int , ) -> Optional[Any]:
super().__init__(bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Tuple = vocab_size
lowerCAmelCase_ : Union[str, Any] = max_position_embeddings
lowerCAmelCase_ : Any = hidden_size
lowerCAmelCase_ : List[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : str = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[Any] = rotary_pct
lowerCAmelCase_ : Any = rotary_emb_base
lowerCAmelCase_ : List[str] = attention_dropout
lowerCAmelCase_ : Union[str, Any] = hidden_dropout
lowerCAmelCase_ : Tuple = classifier_dropout
lowerCAmelCase_ : Union[str, Any] = initializer_range
lowerCAmelCase_ : Any = layer_norm_eps
lowerCAmelCase_ : str = use_cache
lowerCAmelCase_ : str = tie_word_embeddings
lowerCAmelCase_ : str = use_parallel_residual
lowerCAmelCase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def __lowercase ( self : List[str] ) -> List[str]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F'got {self.rope_scaling}' )
lowerCAmelCase_ : Optional[Any] = self.rope_scaling.get("""type""" , lowerCamelCase )
lowerCAmelCase_ : int = self.rope_scaling.get("""factor""" , lowerCamelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(lowerCamelCase , lowerCamelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 120
| 1
|
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase_( ) -> Any:
'''simple docstring'''
_lowerCamelCase : Tuple = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
_lowerCamelCase : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
return image
def lowerCamelCase_( _lowerCamelCase ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
_lowerCamelCase : Optional[Any] = dct.pop(_lowerCamelCase )
_lowerCamelCase : Tuple = val
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Any:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
_lowerCamelCase : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_lowerCamelCase : Tuple = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCamelCase : str = qkv_bias
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = 364 if "coco" in model_name else 224
_lowerCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCamelCase : List[Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCamelCase : Optional[int] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCamelCase : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCamelCase : str = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
_lowerCamelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=False ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Tuple = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
_lowerCamelCase : List[str] = tokenizer("\n" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCamelCase : Optional[int] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCamelCase : List[str] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCamelCase : Dict = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
_lowerCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
_lowerCamelCase : Optional[int] = "cuda" if torch.cuda.is_available() else "cpu"
_lowerCamelCase : int = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("Done!" )
# update state dict keys
_lowerCamelCase : Tuple = original_model.state_dict()
_lowerCamelCase : str = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCamelCase : Optional[Any] = state_dict.pop(_lowerCamelCase )
if key.startswith("Qformer.bert" ):
_lowerCamelCase : str = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
_lowerCamelCase : Tuple = key.replace("self" , "attention" )
if "opt_proj" in key:
_lowerCamelCase : Dict = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
_lowerCamelCase : List[str] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
_lowerCamelCase : int = key.replace("opt" , "language" )
if key.startswith("t5" ):
_lowerCamelCase : Any = key.replace("t5" , "language" )
_lowerCamelCase : int = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCamelCase : int = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCamelCase : int = load_demo_image()
_lowerCamelCase : List[Any] = vis_processors["eval"](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCamelCase : Optional[int] = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCamelCase : Any = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCamelCase : Dict = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCamelCase : str = processor(images=_lowerCamelCase , return_tensors="pt" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCamelCase : Tuple = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
_lowerCamelCase : List[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCamelCase : Optional[int] = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
_lowerCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
_lowerCamelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCamelCase : str = torch.tensor(
[[-41.5850, -4.4_4_4_0, -8.9_9_2_2], [-47.4322, -5.9_1_4_3, -1.7_3_4_0]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCamelCase : Optional[Any] = torch.tensor(
[[-57.0109, -9.8_9_6_7, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCamelCase : str = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
_lowerCamelCase : List[str] = ""
_lowerCamelCase : Dict = tokenizer(_lowerCamelCase , return_tensors="pt" ).input_ids.to(_lowerCamelCase )
_lowerCamelCase : Optional[int] = original_model.generate({"image": original_pixel_values} )
_lowerCamelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , _lowerCamelCase )
_lowerCamelCase : List[Any] = input_ids.shape[1]
_lowerCamelCase : Tuple = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCamelCase : int = [text.strip() for text in output_text]
print("HF generation:" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
_lowerCAmelCase : Dict = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 365
|
"""simple docstring"""
import logging
from transformers.configuration_utils import PretrainedConfig
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
class A_ ( _a ):
lowerCAmelCase__ = 'masked_bert'
def __init__( self: Union[str, Any] ,__lowerCAmelCase: Dict=30_522 ,__lowerCAmelCase: Optional[int]=768 ,__lowerCAmelCase: Dict=12 ,__lowerCAmelCase: List[Any]=12 ,__lowerCAmelCase: List[Any]=3_072 ,__lowerCAmelCase: List[Any]="gelu" ,__lowerCAmelCase: Union[str, Any]=0.1 ,__lowerCAmelCase: List[str]=0.1 ,__lowerCAmelCase: Tuple=512 ,__lowerCAmelCase: str=2 ,__lowerCAmelCase: Tuple=0.02 ,__lowerCAmelCase: Union[str, Any]=1e-12 ,__lowerCAmelCase: Union[str, Any]=0 ,__lowerCAmelCase: List[Any]="topK" ,__lowerCAmelCase: Optional[Any]="constant" ,__lowerCAmelCase: Optional[Any]=0.0 ,**__lowerCAmelCase: str ,):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCAmelCase ,**__lowerCAmelCase )
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Optional[Any] = hidden_size
_lowerCamelCase : Tuple = num_hidden_layers
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[Any] = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCamelCase : str = max_position_embeddings
_lowerCamelCase : List[str] = type_vocab_size
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : int = pruning_method
_lowerCamelCase : str = mask_init
_lowerCamelCase : List[Any] = mask_scale
| 340
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowerCAmelCase : Optional[Any] = 4
_lowerCAmelCase : Optional[int] = 48
_lowerCAmelCase : List[Any] = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCAmelCase : List[str] = [6, 6, 6, 6]
_lowerCAmelCase : Tuple = 60
_lowerCAmelCase : Optional[Any] = [6, 6, 6, 6]
_lowerCAmelCase : Any = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCAmelCase : Dict = 4
_lowerCAmelCase : int = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : str = 126
_lowerCAmelCase : Optional[Any] = 7
_lowerCAmelCase : Dict = 2_55.0
_lowerCAmelCase : List[str] = ""
return config
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_lowerCAmelCase : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCAmelCase : int = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
_lowerCAmelCase : Optional[int] = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
_lowerCAmelCase : Optional[Any] = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase : List[Any] = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase : Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_lowerCAmelCase : Optional[Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_lowerCAmelCase : Tuple = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_lowerCAmelCase : int = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_lowerCAmelCase : Optional[Any] = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
_lowerCAmelCase : List[Any] = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
_lowerCAmelCase : Union[str, Any] = "layernorm.weight"
if name == "norm.bias":
_lowerCAmelCase : Optional[int] = "layernorm.bias"
if "conv_first" in name:
_lowerCAmelCase : str = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowerCAmelCase : Dict = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowerCAmelCase : Tuple = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
_lowerCAmelCase : List[str] = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("upsample.2" , "upsample.convolution_1" )
_lowerCAmelCase : Optional[Any] = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
_lowerCAmelCase : Any = name.replace("upsample.0.weight" , "upsample.conv.weight" )
_lowerCAmelCase : Optional[Any] = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
_lowerCAmelCase : Tuple = "swin2sr." + name
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[Any] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase : Tuple = key.split("." )
_lowerCAmelCase : Optional[int] = int(key_split[1] )
_lowerCAmelCase : Any = int(key_split[4] )
_lowerCAmelCase : str = config.embed_dim
if "weight" in key:
_lowerCAmelCase : List[Any] = val[:dim, :]
_lowerCAmelCase : Optional[Any] = val[dim : dim * 2, :]
_lowerCAmelCase : int = val[-dim:, :]
else:
_lowerCAmelCase : Optional[int] = val[:dim]
_lowerCAmelCase : int = val[dim : dim * 2]
_lowerCAmelCase : str = val[-dim:]
pass
else:
_lowerCAmelCase : Optional[int] = val
return orig_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
_lowerCAmelCase : str = SwinaSRForImageSuperResolution(_lowerCamelCase )
model.eval()
_lowerCAmelCase : str = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : List[str] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : str = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError("Missing keys when converting: {}".format(_lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
_lowerCAmelCase : Union[str, Any] = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
_lowerCAmelCase : Tuple = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
_lowerCAmelCase : int = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowerCAmelCase : List[Any] = 126 if "Jpeg" in checkpoint_url else 256
_lowerCAmelCase : int = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowerCAmelCase : List[str] = transforms(_lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
_lowerCAmelCase : str = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowerCAmelCase : Optional[Any] = model(_lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 512, 512] )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCAmelCase : Optional[int] = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowerCAmelCase : List[str] = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : Tuple = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCAmelCase : Union[str, Any] = torch.Size([1, 3, 512, 512] )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCAmelCase : Tuple = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowerCamelCase , atol=1e-3 )
print("Looks ok!" )
_lowerCAmelCase : Any = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
_lowerCAmelCase : str = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_snake_case = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 36
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any]=False ) -> Optional[int]:
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''deit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''deit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''deit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''deit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''deit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''deit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''deit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''deit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''deit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''deit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''deit.embeddings.cls_token'''),
('''dist_token''', '''deit.embeddings.distillation_token'''),
('''patch_embed.proj.weight''', '''deit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''deit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''deit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "deit" from all keys that start with "deit"
_snake_case = [(pair[0], pair[1][4:]) if pair[1].startswith('''deit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification heads
rename_keys.extend(
[
('''norm.weight''', '''deit.layernorm.weight'''),
('''norm.bias''', '''deit.layernorm.bias'''),
('''head.weight''', '''cls_classifier.weight'''),
('''head.bias''', '''cls_classifier.bias'''),
('''head_dist.weight''', '''distillation_classifier.weight'''),
('''head_dist.bias''', '''distillation_classifier.bias'''),
] )
return rename_keys
def _UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple=False ) -> Tuple:
for i in range(config.num_hidden_layers ):
if base_model:
_snake_case = ''''''
else:
_snake_case = '''deit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = in_proj_bias[: config.hidden_size]
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : Tuple , __lowerCamelCase : Tuple ) -> Tuple:
_snake_case = dct.pop(__lowerCamelCase )
_snake_case = val
def _UpperCAmelCase ( ) -> Dict:
_snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_snake_case = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> str:
_snake_case = DeiTConfig()
# all deit models have fine-tuned heads
_snake_case = False
# dataset (fine-tuned on ImageNet 2012), patch_size and image_size
_snake_case = 10_00
_snake_case = '''huggingface/label-files'''
_snake_case = '''imagenet-1k-id2label.json'''
_snake_case = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type='''dataset''' ) , '''r''' ) )
_snake_case = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
_snake_case = int(deit_name[-6:-4] )
_snake_case = int(deit_name[-3:] )
# size of the architecture
if deit_name[9:].startswith('''tiny''' ):
_snake_case = 1_92
_snake_case = 7_68
_snake_case = 12
_snake_case = 3
elif deit_name[9:].startswith('''small''' ):
_snake_case = 3_84
_snake_case = 15_36
_snake_case = 12
_snake_case = 6
if deit_name[9:].startswith('''base''' ):
pass
elif deit_name[4:].startswith('''large''' ):
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# load original model from timm
_snake_case = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_snake_case = timm_model.state_dict()
_snake_case = create_rename_keys(__lowerCamelCase , __lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
_snake_case = DeiTForImageClassificationWithTeacher(__lowerCamelCase ).eval()
model.load_state_dict(__lowerCamelCase )
# Check outputs on an image, prepared by DeiTImageProcessor
_snake_case = int(
(2_56 / 2_24) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103
_snake_case = DeiTImageProcessor(size=__lowerCamelCase , crop_size=config.image_size )
_snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
_snake_case = encoding['''pixel_values''']
_snake_case = model(__lowerCamelCase )
_snake_case = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase , outputs.logits , atol=1E-3 )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f'''Saving model {deit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--deit_name',
default='vit_deit_base_distilled_patch16_224',
type=str,
help='Name of the DeiT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCAmelCase__ = parser.parse_args()
convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
| 288
| 0
|
_snake_case : List[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def a_ ( lowerCAmelCase_ : Dict, lowerCAmelCase_ : Dict, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Union[str, Any] ):
# Return True if there is node that has not iterated.
__lowerCAmelCase = [False] * len(lowerCAmelCase_ )
__lowerCAmelCase = [s]
__lowerCAmelCase = True
while queue:
__lowerCAmelCase = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCAmelCase_ )
__lowerCAmelCase = True
__lowerCAmelCase = u
return visited[t]
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Any, lowerCAmelCase_ : str ):
__lowerCAmelCase = [-1] * (len(lowerCAmelCase_ ))
__lowerCAmelCase = 0
__lowerCAmelCase = []
__lowerCAmelCase = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
__lowerCAmelCase = float('Inf' )
__lowerCAmelCase = sink
while s != source:
# Find the minimum value in select path
__lowerCAmelCase = min(lowerCAmelCase_, graph[parent[s]][s] )
__lowerCAmelCase = parent[s]
max_flow += path_flow
__lowerCAmelCase = sink
while v != source:
__lowerCAmelCase = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
__lowerCAmelCase = parent[v]
for i in range(len(lowerCAmelCase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 352
|
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Any, lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Tuple ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
__lowerCAmelCase = TapasConfig.from_json_file(lowerCAmelCase_ )
# set absolute/relative position embeddings parameter
__lowerCAmelCase = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WTQ":
# run_task_main.py hparams
__lowerCAmelCase = 4
__lowerCAmelCase = True
# hparam_utils.py hparams
__lowerCAmelCase = 0.66_4694
__lowerCAmelCase = 0.20_7951
__lowerCAmelCase = 0.12_1194
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = 0.035_2513
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
__lowerCAmelCase = 4
__lowerCAmelCase = False
# hparam_utils.py hparams
__lowerCAmelCase = 36.4519
__lowerCAmelCase = 0.90_3421
__lowerCAmelCase = 222.088
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = 0.76_3141
__lowerCAmelCase = TapasForQuestionAnswering(config=lowerCAmelCase_ )
elif task == "TABFACT":
__lowerCAmelCase = TapasForSequenceClassification(config=lowerCAmelCase_ )
elif task == "MLM":
__lowerCAmelCase = TapasForMaskedLM(config=lowerCAmelCase_ )
elif task == "INTERMEDIATE_PRETRAINING":
__lowerCAmelCase = TapasModel(config=lowerCAmelCase_ )
else:
raise ValueError(F"""Task {task} not supported.""" )
print(F"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Save pytorch-model (weights and configuration)
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Save tokenizer files
print(F"""Save tokenizer files to {pytorch_dump_path}""" )
__lowerCAmelCase = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt', model_max_length=512 )
tokenizer.save_pretrained(lowerCAmelCase_ )
print('Used relative position embeddings:', model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_snake_case : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 207
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a_ = {
'configuration_efficientformer': [
'EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientFormerConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['EfficientFormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientFormerForImageClassification',
'EfficientFormerForImageClassificationWithTeacher',
'EfficientFormerModel',
'EfficientFormerPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFEfficientFormerForImageClassification',
'TFEfficientFormerForImageClassificationWithTeacher',
'TFEfficientFormerModel',
'TFEfficientFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 175
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : int=False ):
try:
UpperCamelCase_ : Union[str, Any] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase_ : List[str] = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase_ : Union[str, Any] = strtobool(lowerCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
a_ = parse_flag_from_env('RUN_SLOW', default=False)
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skip('Test was skipped' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[str] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : str ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Optional[Any] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Any ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Tuple ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=None ):
if test_case is None:
return partial(lowerCamelCase , version=lowerCamelCase )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase ) , F"test requires torch version >= {version}" )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : int ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase )
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase )
a_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def __lowercase ( lowerCamelCase : Dict ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase )
class _lowercase ( unittest.TestCase ):
lowercase = True
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : str = tempfile.mkdtemp()
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any ) -> Union[str, Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(snake_case )
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _lowercase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Union[mock.Mock, List[mock.Mock]] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : str = mocks if isinstance(snake_case , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def __lowercase ( lowerCamelCase : Optional[Any] ):
UpperCamelCase_ : str = AcceleratorState()
UpperCamelCase_ : str = tensor[None].clone().to(state.device )
UpperCamelCase_ : List[Any] = gather(lowerCamelCase ).cpu()
UpperCamelCase_ : Tuple = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase ):
return False
return True
class _lowercase :
def __init__( self : Optional[int] , snake_case : Any , snake_case : List[Any] , snake_case : int ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : int = returncode
UpperCamelCase_ : Optional[int] = stdout
UpperCamelCase_ : Optional[int] = stderr
async def __lowercase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple ):
while True:
UpperCamelCase_ : Tuple = await stream.readline()
if line:
callback(lowerCamelCase )
else:
break
async def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Dict=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[str]=None , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=False ):
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase ) )
UpperCamelCase_ : Optional[int] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase_ : str = []
UpperCamelCase_ : Union[str, Any] = []
def tee(lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : List[str]="" ):
UpperCamelCase_ : int = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase )
if not quiet:
print(lowerCamelCase , lowerCamelCase , file=lowerCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase : tee(lowerCamelCase , lowerCamelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase , )
return _RunOutput(await p.wait() , lowerCamelCase , lowerCamelCase )
def __lowercase ( lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Any=180 , lowerCamelCase : Dict=False , lowerCamelCase : Optional[int]=True ):
UpperCamelCase_ : str = asyncio.get_event_loop()
UpperCamelCase_ : Union[str, Any] = loop.run_until_complete(
_stream_subprocess(lowerCamelCase , env=lowerCamelCase , stdin=lowerCamelCase , timeout=lowerCamelCase , quiet=lowerCamelCase , echo=lowerCamelCase ) )
UpperCamelCase_ : int = ' '.join(lowerCamelCase )
if result.returncode > 0:
UpperCamelCase_ : Dict = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class _lowercase ( snake_case_ ):
pass
def __lowercase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[int]=False ):
try:
UpperCamelCase_ : Any = subprocess.check_output(lowerCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase , 'decode' ):
UpperCamelCase_ : Any = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 175
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''decision_transformer'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , lowercase_ : Dict=17 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=128 , lowercase_ : Optional[Any]=4096 , lowercase_ : Tuple=True , lowercase_ : Dict=1 , lowercase_ : Tuple=1024 , lowercase_ : Tuple=3 , lowercase_ : Optional[int]=1 , lowercase_ : Optional[int]=None , lowercase_ : List[str]="relu" , lowercase_ : List[str]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : List[str]=1E-5 , lowercase_ : Any=0.02 , lowercase_ : List[Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Dict=50256 , lowercase_ : str=50256 , lowercase_ : Tuple=False , lowercase_ : str=False , **lowercase_ : str , ):
lowercase_ : Tuple = state_dim
lowercase_ : Optional[int] = act_dim
lowercase_ : str = hidden_size
lowercase_ : Dict = max_ep_len
lowercase_ : Any = action_tanh
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : List[str] = n_positions
lowercase_ : Optional[int] = n_layer
lowercase_ : Tuple = n_head
lowercase_ : Any = n_inner
lowercase_ : Optional[Any] = activation_function
lowercase_ : List[str] = resid_pdrop
lowercase_ : str = embd_pdrop
lowercase_ : Dict = attn_pdrop
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Dict = initializer_range
lowercase_ : Tuple = scale_attn_weights
lowercase_ : Optional[Any] = use_cache
lowercase_ : Optional[int] = scale_attn_by_inverse_layer_idx
lowercase_ : Any = reorder_and_upcast_attn
lowercase_ : Optional[Any] = bos_token_id
lowercase_ : Optional[Any] = eos_token_id
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 21
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = DistilBertTokenizer
UpperCamelCase__ = DistilBertTokenizerFast
UpperCamelCase__ = True
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = DistilBertTokenizer.from_pretrained("""distilbert-base-uncased""" )
lowercase_ : str = tokenizer.encode("""sequence builders""" , add_special_tokens=lowercase_ )
lowercase_ : Optional[int] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=lowercase_ )
lowercase_ : Dict = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 21
| 1
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 36
|
import argparse
from collections import defaultdict
import yaml
_snake_case = "docs/source/en/_toctree.yml"
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = defaultdict(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[str] = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({"local": doc["local"], "title": doc["title"]} )
else:
new_doc_list.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = new_doc_list
_lowerCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
_lowerCAmelCase : str = []
for duplicate_key in duplicates:
_lowerCAmelCase : List[str] = list({doc["title"] for doc in doc_list if doc["local"] == duplicate_key} )
if len(_lowerCamelCase ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if "local" not in counts or counts[doc["local"]] == 1] )
_lowerCAmelCase : Optional[Any] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_lowerCamelCase ) > 1:
raise ValueError("{doc_list} has two 'overview' docs which is not allowed." )
overview_doc.extend(_lowerCamelCase )
# Sort
return overview_doc
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : int = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : List[str] = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : Union[str, Any] = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_lowerCAmelCase : Optional[Any] = api_doc[scheduler_idx]["sections"]
_lowerCAmelCase : Optional[Any] = clean_doc_toc(_lowerCamelCase )
_lowerCAmelCase : int = False
if new_scheduler_doc != scheduler_doc:
_lowerCAmelCase : List[Any] = True
if overwrite:
_lowerCAmelCase : Dict = new_scheduler_doc
if diff:
if overwrite:
_lowerCAmelCase : Tuple = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
def A ( _lowerCamelCase=False ):
'''simple docstring'''
with open(_lowerCamelCase , encoding="utf-8" ) as f:
_lowerCAmelCase : Tuple = yaml.safe_load(f.read() )
# Get to the API doc
_lowerCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_lowerCAmelCase : int = content[api_idx]["sections"]
# Then to the model doc
_lowerCAmelCase : List[str] = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = api_doc[pipeline_idx]["sections"]
_lowerCAmelCase : Tuple = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_lowerCAmelCase : List[Any] = pipeline_doc["section"]
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if overwrite:
_lowerCAmelCase : Optional[Any] = new_sub_pipeline_doc
new_pipeline_docs.append(_lowerCamelCase )
# sort overall pipeline doc
_lowerCAmelCase : Union[str, Any] = clean_doc_toc(_lowerCamelCase )
if new_pipeline_docs != pipeline_docs:
_lowerCAmelCase : Dict = True
if overwrite:
_lowerCAmelCase : Optional[int] = new_pipeline_docs
if diff:
if overwrite:
_lowerCAmelCase : Optional[int] = api_doc
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(_lowerCamelCase , allow_unicode=_lowerCamelCase ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
_snake_case = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 36
| 1
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
lowerCAmelCase_ = False
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 12
@property
def _lowerCamelCase ( self ) -> Dict:
return 12
@property
def _lowerCamelCase ( self ) -> List[Any]:
return 32
@property
def _lowerCamelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
snake_case = VQModel(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=3, num_vq_embeddings=self.num_embed, vq_embed_dim=3, )
return model
@property
def _lowerCamelCase ( self ) -> List[Any]:
snake_case = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self ) -> Tuple:
torch.manual_seed(0 )
snake_case = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModel(lowercase_ )
@property
def _lowerCamelCase ( self ) -> str:
torch.manual_seed(0 )
snake_case = 12
snake_case = 12
snake_case = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
snake_case = TransformeraDModel(**lowercase_ )
return model
def _lowerCamelCase ( self ) -> Tuple:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(learnable=lowercase_ )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self ) -> Optional[Any]:
snake_case = 'cpu'
snake_case = self.dummy_vqvae
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_transformer
snake_case = VQDiffusionScheduler(self.num_embed )
snake_case = LearnedClassifierFreeSamplingEmbeddings(
learnable=lowercase_, hidden_size=self.text_embedder_hidden_size, length=tokenizer.model_max_length )
snake_case = VQDiffusionPipeline(
vqvae=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, transformer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
snake_case = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
snake_case = 'teddy bear playing in the pool'
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe([prompt], generator=lowercase_, num_inference_steps=2, output_type='np' )
snake_case = output.images
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipe(
[prompt], generator=lowercase_, output_type='np', return_dict=lowercase_, num_inference_steps=2 )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
snake_case = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ) -> str:
snake_case = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
snake_case = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
snake_case = pipeline.to(lowercase_ )
pipeline.set_progress_bar_config(disable=lowercase_ )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
snake_case = torch.Generator(device=lowercase_ ).manual_seed(0 )
snake_case = pipeline(
'teddy bear playing in the pool', num_images_per_prompt=1, generator=lowercase_, output_type='np', )
snake_case = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332
|
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332
| 1
|
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_UpperCAmelCase = 1_6
_UpperCAmelCase = 3_2
def __magic_name__ ( lowercase , lowercase = 16 ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE_: Any =load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowercase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_: Optional[int] =tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_: List[Any] =datasets.map(
UpperCamelCase_ , batched=UpperCamelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_: Any =tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowercase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_: str =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_: Union[str, Any] =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_: int =8
else:
SCREAMING_SNAKE_CASE_: str =None
return tokenizer.pad(
UpperCamelCase_ , padding="""longest""" , max_length=UpperCamelCase_ , pad_to_multiple_of=UpperCamelCase_ , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_: Optional[int] =DataLoader(
tokenized_datasets["""train"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Any =DataLoader(
tokenized_datasets["""validation"""] , shuffle=UpperCamelCase_ , collate_fn=UpperCamelCase_ , batch_size=UpperCamelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_UpperCAmelCase = mocked_dataloaders # noqa: F811
def __magic_name__ ( lowercase , lowercase ):
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , UpperCamelCase_ ) == "1":
SCREAMING_SNAKE_CASE_: Any =2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="""all""" , project_dir=args.project_dir )
else:
SCREAMING_SNAKE_CASE_: Optional[int] =Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_: str =config["""lr"""]
SCREAMING_SNAKE_CASE_: Tuple =int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE_: Union[str, Any] =int(config["""seed"""] )
SCREAMING_SNAKE_CASE_: Tuple =int(config["""batch_size"""] )
set_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] =get_dataloaders(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Dict =evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE_: List[str] =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE_: str =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE_: Union[str, Any] =MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_: Tuple =AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=UpperCamelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_: Dict =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_: Optional[Any] =AdamW(params=model.parameters() , lr=UpperCamelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_linear_schedule_with_warmup(
optimizer=UpperCamelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict =accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Dict =os.path.split(UpperCamelCase_ )[-1].split(""".""" )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
SCREAMING_SNAKE_CASE_: Union[str, Any] =0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE_: List[str] =model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: Tuple =outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
SCREAMING_SNAKE_CASE_: Optional[Any] =loss / gradient_accumulation_steps
accelerator.backward(UpperCamelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] =model(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE_: List[Any] =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[int] =accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=UpperCamelCase_ , references=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE_: List[Any] =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , UpperCamelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"""accuracy""": eval_metric["""accuracy"""],
"""f1""": eval_metric["""f1"""],
"""train_loss""": total_loss.item() / len(UpperCamelCase_ ),
"""epoch""": epoch,
} , step=UpperCamelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
parser.add_argument(
"""--with_tracking""" , action="""store_true""" , help="""Whether to load in all available experiment trackers from the environment and use them for logging.""" , )
parser.add_argument(
"""--project_dir""" , type=UpperCamelCase_ , default="""logs""" , help="""Location on where to store experiment tracking logs` and relevent project information""" , )
SCREAMING_SNAKE_CASE_: Optional[int] =parser.parse_args()
SCREAMING_SNAKE_CASE_: Tuple ={"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 173
|
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
a_ = (3, 9, -11, 0, 7, 5, 1, -1)
a_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowercase__ :
a_ =42
a_ =42
class lowercase__ :
def __init__( self , __UpperCAmelCase )-> None:
'''simple docstring'''
lowerCAmelCase__ = None
for i in sorted(__UpperCAmelCase , reverse=__UpperCAmelCase ):
lowerCAmelCase__ = Node(__UpperCAmelCase , self.head )
def __iter__( self )-> Iterator[int]:
'''simple docstring'''
lowerCAmelCase__ = self.head
while node:
yield node.data
lowerCAmelCase__ = node.next_node
def __len__( self )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self )-> str:
'''simple docstring'''
return " -> ".join([str(__UpperCAmelCase ) for node in self] )
def _a ( UpperCamelCase_ : SortedLinkedList , UpperCamelCase_ : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(UpperCamelCase_ ) + list(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 340
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase_ = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowerCamelCase__ ( A__ : Tuple , A__ : List[Any] , A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'config.{attribute}' in modeling_source
or f'getattr(config, "{attribute}"' in modeling_source
or f'getattr(self.config, "{attribute}"' in modeling_source
):
__lowerCamelCase = True
# Deal with multi-line cases
elif (
re.search(
Rf'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , A__ , )
is not None
):
__lowerCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__lowerCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__lowerCamelCase = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__lowerCamelCase = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__lowerCamelCase = True
if not attribute_used:
__lowerCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__lowerCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__lowerCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__lowerCamelCase = True
elif attribute.endswith("""_token_id""" ):
__lowerCamelCase = True
# configuration class specific cases
if not case_allowed:
__lowerCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__lowerCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
__lowerCamelCase = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__lowerCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__lowerCamelCase = {}
if len(config_class.attribute_map ) > 0:
__lowerCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__lowerCamelCase = inspect.getsourcefile(A__ )
__lowerCamelCase = os.path.dirname(A__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__lowerCamelCase = [os.path.join(A__ , A__ ) for fn in os.listdir(A__ ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__lowerCamelCase = []
for path in modeling_paths:
if os.path.isfile(A__ ):
with open(A__ ) as fp:
modeling_sources.append(fp.read() )
__lowerCamelCase = []
for config_param, default_value in zip(A__ , A__ ):
# `attributes` here is all the variant names for `config_param`
__lowerCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(A__ , A__ , A__ , A__ ):
unused_attributes.append(attributes[0] )
return sorted(A__ )
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__lowerCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda A__ : inspect.isclass(A__ )
and issubclass(A__ , A__ )
and inspect.getmodule(A__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__lowerCamelCase = check_config_attributes_being_used(A__ )
if len(A__ ) > 0:
__lowerCamelCase = unused_attributes
if len(A__ ) > 0:
__lowerCamelCase = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'{name}: {attributes}\n'
raise ValueError(A__ )
if __name__ == "__main__":
check_config_attributes()
| 29
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class lowerCamelCase__( unittest.TestCase):
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCAmelCase__ ( self: Optional[int] , UpperCamelCase_: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , config_name=UpperCamelCase_ )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = AutoConfig.from_pretrained("""gpt2""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = {
"""max_new_tokens""": 10_24,
"""foo""": """bar""",
}
__lowerCamelCase = copy.deepcopy(UpperCamelCase_ )
__lowerCamelCase = generation_config.update(**UpperCamelCase_ )
# update_kwargs was not modified (no side effects)
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 10_24 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(UpperCamelCase_ , {"""foo""": """bar"""} )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
__lowerCamelCase = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__lowerCamelCase = GenerationConfig.from_model_config(UpperCamelCase_ )
assert not hasattr(UpperCamelCase_ , """foo""" ) # no new kwargs should be initialized if from config
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , UpperCamelCase_ )
self.assertEqual(default_config.num_beams , 1 )
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , UpperCamelCase_ )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(UpperCamelCase_ )
__lowerCamelCase = GenerationConfig.from_pretrained(UpperCamelCase_ , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , UpperCamelCase_ )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class lowerCamelCase__( unittest.TestCase):
@classmethod
def lowerCAmelCase__ ( cls: Optional[Any] ):
__lowerCamelCase = TOKEN
HfFolder.save_token(UpperCamelCase_ )
@classmethod
def lowerCAmelCase__ ( cls: str ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""test-generation-config""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained(F'{USER}/test-generation-config' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = GenerationConfig(
do_sample=UpperCamelCase_ , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
UpperCamelCase_ , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=UpperCamelCase_ , use_auth_token=self._token )
__lowerCamelCase = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(UpperCamelCase_ , getattr(UpperCamelCase_ , UpperCamelCase_ ) )
| 29
| 1
|
'''simple docstring'''
from collections import defaultdict
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> bool:
lowerCamelCase__ : List[str] = first_str.lower().strip()
lowerCamelCase__ : Dict = second_str.lower().strip()
# Remove whitespace
lowerCamelCase__ : int = first_str.replace(""" """ , """""" )
lowerCamelCase__ : Optional[int] = second_str.replace(""" """ , """""" )
# Strings of different lengths are not anagrams
if len(UpperCamelCase ) != len(UpperCamelCase ):
return False
# Default values for count should be 0
lowerCamelCase__ : defaultdict[str, int] = defaultdict(UpperCamelCase )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(UpperCamelCase ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
_A : Optional[Any] =input('''Enter the first string ''').strip()
_A : str =input('''Enter the second string ''').strip()
_A : Dict =check_anagrams(input_a, input_b)
print(F'{input_a} and {input_b} are {"" if status else "not "}anagrams.')
| 41
|
import warnings
warnings.warn(
'memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: '
'`from accelerate import find_executable_batch_size` to avoid this warning.',
FutureWarning,
)
| 207
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase, unittest.TestCase ):
UpperCAmelCase__ = CycleDiffusionPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""negative_prompt""",
"""height""",
"""width""",
"""negative_prompt_embeds""",
}
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : List[str] ) -> List[str]:
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowerCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=UpperCAmelCase , set_alpha_to_one=UpperCAmelCase , )
torch.manual_seed(0 )
lowerCamelCase__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowerCamelCase__ : List[str] = CLIPTextModel(UpperCAmelCase )
lowerCamelCase__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCamelCase__ : Optional[int] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=0 ) -> int:
lowerCamelCase__ : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
lowerCamelCase__ : Any = image / 2 + 0.5
if str(UpperCAmelCase ).startswith('mps' ):
lowerCamelCase__ : Dict = torch.manual_seed(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowerCamelCase__ : List[str] = {
'prompt': 'An astronaut riding an elephant',
'source_prompt': 'An astronaut riding a horse',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'eta': 0.1,
'strength': 0.8,
'guidance_scale': 3,
'source_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[str] = CycleDiffusionPipeline(**UpperCAmelCase )
lowerCamelCase__ : List[Any] = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : int = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = pipe(**UpperCAmelCase )
lowerCamelCase__ : int = output.images
lowerCamelCase__ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : List[Any] = np.array([0.4_4_5_9, 0.4_9_4_3, 0.4_5_4_4, 0.6_6_4_3, 0.5_4_7_4, 0.4_3_2_7, 0.5_7_0_1, 0.5_9_5_9, 0.5_1_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : List[Any] = self.get_dummy_components()
for name, module in components.items():
if hasattr(UpperCAmelCase , 'half' ):
lowerCamelCase__ : str = module.half()
lowerCamelCase__ : int = CycleDiffusionPipeline(**UpperCAmelCase )
lowerCamelCase__ : Any = pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.get_dummy_inputs(UpperCAmelCase )
lowerCamelCase__ : int = pipe(**UpperCAmelCase )
lowerCamelCase__ : Optional[int] = output.images
lowerCamelCase__ : int = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowerCamelCase__ : Optional[int] = np.array([0.3_5_0_6, 0.4_5_4_3, 0.4_4_6, 0.4_5_7_5, 0.5_1_9_5, 0.4_1_5_5, 0.5_2_7_3, 0.5_1_8, 0.4_1_1_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A_ ( self : List[Any] ) -> int:
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline' )
def A_ ( self : Dict ) -> Union[str, Any]:
return super().test_inference_batch_single_identical()
@skip_mps
def A_ ( self : List[Any] ) -> Dict:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A_ ( self : Any ) -> Union[str, Any]:
return super().test_save_load_optional_components()
@skip_mps
def A_ ( self : Optional[Any] ) -> Any:
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Optional[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : str ) -> Dict:
lowerCamelCase__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase__ : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy' )
lowerCamelCase__ : Dict = init_image.resize((512, 512) )
lowerCamelCase__ : Optional[int] = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase__ : Optional[int] = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder='scheduler' )
lowerCamelCase__ : Optional[int] = CycleDiffusionPipeline.from_pretrained(
UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , torch_dtype=torch.floataa , revision='fp16' )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Any = 'A black colored car'
lowerCamelCase__ : List[str] = 'A blue colored car'
lowerCamelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCamelCase__ : str = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type='np' , )
lowerCamelCase__ : int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A_ ( self : Optional[int] ) -> int:
lowerCamelCase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png' )
lowerCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy' )
lowerCamelCase__ : List[Any] = init_image.resize((512, 512) )
lowerCamelCase__ : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
lowerCamelCase__ : Union[str, Any] = DDIMScheduler.from_pretrained(UpperCAmelCase , subfolder='scheduler' )
lowerCamelCase__ : str = CycleDiffusionPipeline.from_pretrained(UpperCAmelCase , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
lowerCamelCase__ : Union[str, Any] = 'A black colored car'
lowerCamelCase__ : Optional[Any] = 'A blue colored car'
lowerCamelCase__ : Tuple = torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = pipe(
prompt=UpperCAmelCase , source_prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.8_5 , guidance_scale=3 , source_guidance_scale=1 , generator=UpperCAmelCase , output_type='np' , )
lowerCamelCase__ : Any = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 361
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45
| 0
|
from ..utils import DummyObject, requires_backends
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> int:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> List[str]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Dict:
requires_backends(lowerCamelCase_ , ['torch'] )
def UpperCamelCase_( *lowerCamelCase_ , **lowerCamelCase_ ) -> Optional[int]:
requires_backends(lowerCamelCase_ , ['torch'] )
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[int] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Tuple = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : str = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Union[str, Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Any = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : int = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> str:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Dict = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : Optional[Any] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> Any:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['torch'])
class _lowerCamelCase( metaclass=_a ):
lowercase_ : List[str] = ["""torch"""]
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> List[str]:
"""simple docstring"""
requires_backends(self, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['torch'])
@classmethod
def UpperCamelCase ( cls, *lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
requires_backends(cls, ['torch'])
| 21
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
requires_backends(self, 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def UpperCamelCase ( self, lowerCamelCase=None) -> int:
"""simple docstring"""
_lowercase : Dict = {}
if top_k is not None:
_lowercase : List[str] = top_k
return {}, {}, postprocess_params
def __call__( self, lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase) -> str:
"""simple docstring"""
_lowercase : Optional[Any] = load_image(lowerCamelCase)
_lowercase : List[str] = self.image_processor(images=lowerCamelCase, return_tensors=self.framework)
return model_inputs
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : Optional[int] = self.model(**lowerCamelCase)
return model_outputs
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=5) -> Dict:
"""simple docstring"""
if top_k > self.model.config.num_labels:
_lowercase : List[Any] = self.model.config.num_labels
if self.framework == "pt":
_lowercase : int = model_outputs.logits.softmax(-1)[0]
_lowercase , _lowercase : Union[str, Any] = probs.topk(lowerCamelCase)
elif self.framework == "tf":
_lowercase : int = stable_softmax(model_outputs.logits, axis=-1)[0]
_lowercase : List[Any] = tf.math.top_k(lowerCamelCase, k=lowerCamelCase)
_lowercase , _lowercase : Any = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''')
_lowercase : str = scores.tolist()
_lowercase : str = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowerCamelCase, lowerCamelCase)]
| 21
| 1
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def A__ ( __lowerCamelCase, __lowerCamelCase = "cpu", __lowerCamelCase = None ):
SCREAMING_SNAKE_CASE_ = torch.load(__lowerCamelCase, map_location=__lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__lowerCamelCase, torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE_ = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE_ = src_path
torch.save(__lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 366
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =TransfoXLTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Any:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(_A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 257
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_lowercase : int = False
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def a ( self : Union[str, Any] ):
return 12
@property
def a ( self : str ):
return 12
@property
def a ( self : Optional[Any] ):
return 32
@property
def a ( self : List[str] ):
torch.manual_seed(0 )
__UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def a ( self : int ):
__UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a ( self : Tuple ):
torch.manual_seed(0 )
__UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(_lowercase )
@property
def a ( self : List[Any] ):
torch.manual_seed(0 )
__UpperCAmelCase = 12
__UpperCAmelCase = 12
__UpperCAmelCase = {
'''attention_bias''': True,
'''cross_attention_dim''': 32,
'''attention_head_dim''': height * width,
'''num_attention_heads''': 1,
'''num_vector_embeds''': self.num_embed,
'''num_embeds_ada_norm''': self.num_embeds_ada_norm,
'''norm_num_groups''': 32,
'''sample_size''': width,
'''activation_fn''': '''geglu-approximate''',
}
__UpperCAmelCase = TransformeraDModel(**_lowercase )
return model
def a ( self : Tuple ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.dummy_vqvae
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_transformer
__UpperCAmelCase = VQDiffusionScheduler(self.num_embed )
__UpperCAmelCase = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
__UpperCAmelCase = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''teddy bear playing in the pool'''
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='''np''' )
__UpperCAmelCase = output.images
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipe(
[prompt] , generator=_lowercase , output_type='''np''' , return_dict=_lowercase , num_inference_steps=2 )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCAmelCase = np.array([0.6_551, 0.6_168, 0.5_008, 0.5_676, 0.5_659, 0.4_295, 0.6_073, 0.5_599, 0.4_992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def a ( self : Any ):
__UpperCAmelCase = '''cpu'''
__UpperCAmelCase = self.dummy_vqvae
__UpperCAmelCase = self.dummy_text_encoder
__UpperCAmelCase = self.dummy_tokenizer
__UpperCAmelCase = self.dummy_transformer
__UpperCAmelCase = VQDiffusionScheduler(self.num_embed )
__UpperCAmelCase = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__UpperCAmelCase = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__UpperCAmelCase = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__UpperCAmelCase = '''teddy bear playing in the pool'''
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='''np''' )
__UpperCAmelCase = output.images
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipe(
[prompt] , generator=_lowercase , output_type='''np''' , return_dict=_lowercase , num_inference_steps=2 )[0]
__UpperCAmelCase = image[0, -3:, -3:, -1]
__UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__UpperCAmelCase = np.array([0.6_693, 0.6_075, 0.4_959, 0.5_701, 0.5_583, 0.4_333, 0.6_171, 0.5_684, 0.4_988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : Union[str, Any] ):
__UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy''' )
__UpperCAmelCase = VQDiffusionPipeline.from_pretrained('''microsoft/vq-diffusion-ithq''' )
__UpperCAmelCase = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__UpperCAmelCase = torch.Generator(device=_lowercase ).manual_seed(0 )
__UpperCAmelCase = pipeline(
'''teddy bear playing in the pool''' , num_images_per_prompt=1 , generator=_lowercase , output_type='''np''' , )
__UpperCAmelCase = output.images[0]
assert image.shape == (2_56, 2_56, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 332
|
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__snake_case : Optional[Any] = get_tests_dir('fixtures')
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = mock.Mock()
__lowerCAmelCase : Optional[int] = 500
__lowerCAmelCase : str = {}
__lowerCAmelCase : str = HTTPError
__lowerCAmelCase : int = {}
# Download this model to make sure it's in the cache.
__lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_UpperCamelCase) as mock_head:
__lowerCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCamelCase):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(_UpperCamelCase)
@is_staging_test
class A__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = TOKEN
HfFolder.save_token(_UpperCamelCase)
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: str) -> Tuple:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = ViTImageProcessor.from_pretrained(_UpperCamelCase)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
__lowerCAmelCase : str = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id="test-image-processor" , push_to_hub=_UpperCamelCase , use_auth_token=self._token)
__lowerCAmelCase : List[Any] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""")
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(_UpperCamelCase)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
__lowerCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_UpperCamelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=_UpperCamelCase , use_auth_token=self._token)
__lowerCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(_UpperCamelCase , getattr(_UpperCamelCase , _UpperCamelCase))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
CustomImageProcessor.register_for_auto_class()
__lowerCAmelCase : Tuple = CustomImageProcessor.from_pretrained(_UpperCamelCase)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
__lowerCAmelCase : List[str] = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_UpperCamelCase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 358
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=_SCREAMING_SNAKE_CASE).to(_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("google/mt5-small")
__lowerCAmelCase : Tuple = tokenizer("Hello there" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = tokenizer("Hi I am" , return_tensors="pt").input_ids
__lowerCAmelCase : List[str] = model(input_ids.to(_SCREAMING_SNAKE_CASE) , labels=labels.to(_SCREAMING_SNAKE_CASE)).loss
__lowerCAmelCase : Optional[int] = -(labels.shape[-1] * loss.item())
__lowerCAmelCase : List[str] = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE) < 1e-4)
| 58
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''funnel'''
_snake_case : int = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=[4, 4, 4] , _UpperCamelCase=None , _UpperCamelCase=2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=6_4 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu_new" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=0.0 , _UpperCamelCase=0.1 , _UpperCamelCase=None , _UpperCamelCase=1E-9 , _UpperCamelCase="mean" , _UpperCamelCase="relative_shift" , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , **_UpperCamelCase , ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = block_sizes
UpperCAmelCase_ : Tuple = [1] * len(_UpperCamelCase ) if block_repeats is None else block_repeats
assert len(_UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
UpperCAmelCase_ : List[Any] = num_decoder_layers
UpperCAmelCase_ : Union[str, Any] = d_model
UpperCAmelCase_ : Any = n_head
UpperCAmelCase_ : List[str] = d_head
UpperCAmelCase_ : Dict = d_inner
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout
UpperCAmelCase_ : Optional[int] = attention_dropout
UpperCAmelCase_ : Any = activation_dropout
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Dict = initializer_std
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."
UpperCAmelCase_ : Optional[int] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."
UpperCAmelCase_ : List[Any] = attention_type
UpperCAmelCase_ : Optional[Any] = separate_cls
UpperCAmelCase_ : int = truncate_seq
UpperCAmelCase_ : str = pool_q_only
super().__init__(**_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> List[str]:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[Any]:
raise NotImplementedError(
'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
| 29
|
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase=None , **_UpperCamelCase ) -> Dict:
logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' )
UpperCAmelCase_ : Any = model
UpperCAmelCase_ : int = kwargs.get('model_save_dir' , _UpperCamelCase )
UpperCAmelCase_ : List[Any] = kwargs.get('latest_model_name' , _UpperCamelCase )
def __call__( self , **_UpperCamelCase ) -> str:
UpperCAmelCase_ : Optional[int] = {k: np.array(_UpperCamelCase ) for k, v in kwargs.items()}
return self.model.run(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> List[Any]:
if provider is None:
logger.info('No onnxruntime provider specified, using CPUExecutionProvider' )
UpperCAmelCase_ : List[str] = 'CPUExecutionProvider'
return ort.InferenceSession(_UpperCamelCase , providers=[provider] , sess_options=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Any = file_name if file_name is not None else ONNX_WEIGHTS_NAME
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(self.latest_model_name )
UpperCAmelCase_ : str = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
UpperCAmelCase_ : Optional[Any] = self.model_save_dir.joinpath(_UpperCamelCase )
if src_path.exists():
UpperCAmelCase_ : List[Any] = Path(_UpperCamelCase ).joinpath(_UpperCamelCase )
try:
shutil.copyfile(_UpperCamelCase , _UpperCamelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase ( self , _UpperCamelCase , **_UpperCamelCase , ) -> List[str]:
if os.path.isfile(_UpperCamelCase ):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
# saving model weights/files
self._save_pretrained(_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> List[str]:
UpperCAmelCase_ : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_UpperCamelCase ):
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(_UpperCamelCase , _UpperCamelCase ) , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
UpperCAmelCase_ : Tuple = Path(_UpperCamelCase )
# load model from hub
else:
# download model
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id=_UpperCamelCase , filename=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , )
UpperCAmelCase_ : Union[str, Any] = Path(_UpperCamelCase ).parent
UpperCAmelCase_ : List[str] = Path(_UpperCamelCase ).name
UpperCAmelCase_ : Union[str, Any] = OnnxRuntimeModel.load_model(_UpperCamelCase , provider=_UpperCamelCase , sess_options=_UpperCamelCase )
return cls(model=_UpperCamelCase , **_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , **_UpperCamelCase , ) -> Optional[int]:
UpperCAmelCase_ : List[str] = None
if len(str(_UpperCamelCase ).split('@' ) ) == 2:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = model_id.split('@' )
return cls._from_pretrained(
model_id=_UpperCamelCase , revision=_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , use_auth_token=_UpperCamelCase , **_UpperCamelCase , )
| 29
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = {
'''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''',
}
class lowerCamelCase (a__ ):
"""simple docstring"""
UpperCAmelCase_ = """open-llama"""
def __init__( self : Any, _UpperCAmelCase : Optional[int]=1_0_0_0_0_0, _UpperCAmelCase : Dict=4_0_9_6, _UpperCAmelCase : Any=1_1_0_0_8, _UpperCAmelCase : List[str]=3_2, _UpperCAmelCase : List[str]=3_2, _UpperCAmelCase : Optional[int]="silu", _UpperCAmelCase : Optional[int]=2_0_4_8, _UpperCAmelCase : Dict=0.02, _UpperCAmelCase : Optional[Any]=1E-6, _UpperCAmelCase : str=True, _UpperCAmelCase : List[str]=0, _UpperCAmelCase : Tuple=1, _UpperCAmelCase : List[str]=2, _UpperCAmelCase : List[Any]=False, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : Optional[Any]=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : int=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=None, **_UpperCAmelCase : Optional[int], ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = rms_norm_eps
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop(
"use_memorry_efficient_attention", lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : int = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = attention_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_stable_embedding
SCREAMING_SNAKE_CASE__ : str = shared_input_output_embedding
SCREAMING_SNAKE_CASE__ : str = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, tie_word_embeddings=lowerCAmelCase__, **lowerCAmelCase__, )
def A_ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling, lowerCAmelCase__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
SCREAMING_SNAKE_CASE__ : Any = self.rope_scaling.get("type", lowerCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = self.rope_scaling.get("factor", lowerCAmelCase__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowerCAmelCase__, lowerCAmelCase__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 363
|
_lowerCamelCase : dict[tuple[int, int, int], int] = {}
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : Dict = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Any = _calculate(days - 1 , SCREAMING_SNAKE_CASE__ , 0 )
SCREAMING_SNAKE_CASE__ : str = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _a ( SCREAMING_SNAKE_CASE__ : int = 30 ) -> int:
'''simple docstring'''
return _calculate(SCREAMING_SNAKE_CASE__ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 191
| 0
|
"""simple docstring"""
from collections import deque
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = process_name # process name
__a = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
__a = arrival_time
__a = burst_time # remaining burst time
__a = 0 # total time of the process wait in ready queue
__a = 0 # time from arrival time to completion time
class snake_case__ :
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
# total number of mlfq's queues
__a = number_of_queues
# time slice of queues that round robin algorithm applied
__a = time_slices
# unfinished process is in this ready_queue
__a = queue
# current time
__a = current_time
# finished process is in this sequence queue
__a = deque()
def a__ ( self ):
__a = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def a__ ( self , lowerCamelCase ):
__a = []
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def a__ ( self , lowerCamelCase ):
return [q.burst_time for q in queue]
def a__ ( self , lowerCamelCase ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def a__ ( self , lowerCamelCase ):
__a = deque() # sequence deque of finished process
while len(_a ) != 0:
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
__a = 0
# set the process's turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# set the completion time
__a = self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def a__ ( self , lowerCamelCase , lowerCamelCase ):
__a = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
__a = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
__a = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
__a = 0
# set the finish time
__a = self.current_time
# update the process' turnaround time because it is finished
__a = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def a__ ( self ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
__a , __a = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
SCREAMING_SNAKE_CASE__:Optional[Any] = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE__:str = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE__:int = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE__:Optional[Any] = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE__:Tuple = 3
SCREAMING_SNAKE_CASE__:str = [17, 25]
SCREAMING_SNAKE_CASE__:Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
SCREAMING_SNAKE_CASE__:Union[str, Any] = Process("""P1""", 0, 53)
SCREAMING_SNAKE_CASE__:str = Process("""P2""", 0, 17)
SCREAMING_SNAKE_CASE__:Dict = Process("""P3""", 0, 68)
SCREAMING_SNAKE_CASE__:Dict = Process("""P4""", 0, 24)
SCREAMING_SNAKE_CASE__:List[str] = 3
SCREAMING_SNAKE_CASE__:Dict = [17, 25]
SCREAMING_SNAKE_CASE__:List[Any] = deque([Pa, Pa, Pa, Pa])
SCREAMING_SNAKE_CASE__:Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
SCREAMING_SNAKE_CASE__:int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 261
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = set()
# Replace all the whitespace in our sentence
__a = input_str.replace(''' ''' , '''''' )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(lowerCAmelCase__ ) == 26
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
__a = [False] * 26
for char in input_str:
if char.islower():
__a = True
elif char.isupper():
__a = True
return all(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str = "The quick brown fox jumps over the lazy dog" , ) -> bool:
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def lowercase ( ) -> None:
from timeit import timeit
__a = '''from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest'''
print(timeit('''is_pangram()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_faster()''' , setup=lowerCAmelCase__ ) )
print(timeit('''is_pangram_fastest()''' , setup=lowerCAmelCase__ ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 45
| 0
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowercase :
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = seq_length
UpperCamelCase = is_training
UpperCamelCase = use_token_type_ids
UpperCamelCase = use_input_mask
UpperCamelCase = use_labels
UpperCamelCase = use_mc_token_ids
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = type_vocab_size
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = num_choices
UpperCamelCase = scope
UpperCamelCase = self.vocab_size - 1
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase = None
if self.use_input_mask:
UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase = None
if self.use_token_type_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase = None
if self.use_mc_token_ids:
UpperCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase = self.get_config()
UpperCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = CTRLModel(config=A_ )
model.to(A_ )
model.eval()
model(A_ , token_type_ids=A_ , head_mask=A_ )
model(A_ , token_type_ids=A_ )
UpperCamelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ , *A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = CTRLLMHeadModel(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) = config_and_inputs
UpperCamelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , *A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = CTRLForSequenceClassification(A_ )
model.to(A_ )
model.eval()
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : int = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
__lowercase : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : int = True
__lowercase : Dict = False
__lowercase : int = False
def __UpperCamelCase ( self , A_ , A_ , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = CTRLModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , n_embd=37 )
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A_ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = CTRLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('The model doesn\'t support left padding' ) # and it's not used enough to be worth fixing :)
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_torch
class lowercase ( unittest.TestCase ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase = CTRLLMHeadModel.from_pretrained('ctrl' )
model.to(A_ )
UpperCamelCase = torch.tensor(
[[11_859, 0, 1_611, 8]] , dtype=torch.long , device=A_ ) # Legal the president is
UpperCamelCase = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCamelCase = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].tolist() , A_ )
| 358
|
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
@staticmethod
def __UpperCamelCase ( A_ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCamelCase ( self , A_ , A_ , *A_ , **A_ ) -> Union[str, Any]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
UpperCamelCase = kwargs.pop('main_process_only' , A_ )
UpperCamelCase = kwargs.pop('in_order' , A_ )
if self.isEnabledFor(A_ ):
if self._should_log(A_ ):
UpperCamelCase , UpperCamelCase = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
elif in_order:
UpperCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
UpperCamelCase , UpperCamelCase = self.process(A_ , A_ )
self.logger.log(A_ , A_ , *A_ , **A_ )
state.wait_for_everyone()
def A ( lowercase , lowercase = None ) -> Dict:
'''simple docstring'''
if log_level is None:
UpperCamelCase = os.environ.get('ACCELERATE_LOG_LEVEL' , lowercase )
UpperCamelCase = logging.getLogger(lowercase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowercase , {} )
| 110
| 0
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : Any = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3, a__ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'{solution() = }')
| 320
|
from math import factorial
def __lowercase ( a__ = 1_00 ) -> int:
return sum(int(a__ ) for x in str(factorial(a__ ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 257
| 0
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _snake_case :
'''simple docstring'''
A__ : float
A__ : TreeNode | None = None
A__ : TreeNode | None = None
def lowerCamelCase_ ( _a : TreeNode | None ):
'''simple docstring'''
def is_valid_tree(_a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(_a , _a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(_a ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
_a : TreeNode | None , _a : float , _a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , _a , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , _a )
)
return is_binary_search_tree_recursive_check(_a , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
def lowerCamelCase_ ( _a : int ):
'''simple docstring'''
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_a , _a ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_a ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 59
| 0
|
import unittest
import numpy as np
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = None , ) -> np.ndarray:
__a = np.shape(a__ )
__a = np.shape(a__ )
__a = np.shape(a__ )
if shape_a[0] != shape_b[0]:
__a = (
'''Expected the same number of rows for A and B. '''
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(a__ )
if shape_b[1] != shape_c[1]:
__a = (
'''Expected the same number of columns for B and C. '''
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(a__ )
__a = pseudo_inv
if a_inv is None:
try:
__a = np.linalg.inv(a__ )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class __A( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a = np.array([[0, 3], [3, 0], [2, 3]] )
__a = np.array([[2, 1], [6, 3]] )
__a = schur_complement(_snake_case , _snake_case , _snake_case )
__a = np.block([[a, b], [b.T, c]] )
__a = np.linalg.det(_snake_case )
__a = np.linalg.det(_snake_case )
__a = np.linalg.det(_snake_case )
self.assertAlmostEqual(_snake_case , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a = np.array([[0, 3], [3, 0], [2, 3]] )
__a = np.array([[2, 1], [6, 3]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
def SCREAMING_SNAKE_CASE_ ( self ) -> None:
'''simple docstring'''
__a = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
__a = np.array([[0, 3], [3, 0], [2, 3]] )
__a = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(_snake_case ):
schur_complement(_snake_case , _snake_case , _snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 6
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger()
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def snake_case_( self , A , A , A ) -> Optional[int]:
_SCREAMING_SNAKE_CASE = len(list(m.modules() ) ) == 1 or isinstance(A , nn.Convad ) or isinstance(A , nn.BatchNormad )
if has_not_submodules:
self.traced.append(A )
def __call__( self , A ) -> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(A )
[x.remove() for x in self.handles]
return self
@property
def snake_case_( self ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda A : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class a_ :
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 0
UpperCamelCase = field(default_factory=snake_case_ )
UpperCamelCase = field(default_factory=snake_case_ )
def __call__( self , A ) -> List[str]:
_SCREAMING_SNAKE_CASE = Tracker(self.dest )(A ).parametrized
_SCREAMING_SNAKE_CASE = Tracker(self.src )(A ).parametrized
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.src_skip , A ) )
_SCREAMING_SNAKE_CASE = list(filter(lambda A : type(A ) not in self.dest_skip , A ) )
if len(A ) != len(A ):
raise Exception(
f'Numbers of operations are different. Source module has {len(A )} operations while'
f' destination module has {len(A )}.' )
for dest_m, src_m in zip(A , A ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def lowerCamelCase ( __lowerCamelCase : str , __lowerCamelCase : ResNetConfig , __lowerCamelCase : Path , __lowerCamelCase : bool = True ) ->int:
print(F'Converting {name}...' )
with torch.no_grad():
_SCREAMING_SNAKE_CASE = timm.create_model(__lowerCamelCase , pretrained=__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ResNetForImageClassification(__lowerCamelCase ).eval()
_SCREAMING_SNAKE_CASE = ModuleTransfer(src=__lowerCamelCase , dest=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = torch.randn((1, 3, 224, 224) )
module_transfer(__lowerCamelCase )
assert torch.allclose(from_model(__lowerCamelCase ) , our_model(__lowerCamelCase ).logits ), "The model logits don't match the original one."
_SCREAMING_SNAKE_CASE = F'resnet{"-".join(name.split("resnet" ) )}'
print(__lowerCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__lowerCamelCase , )
# we can use the convnext one
_SCREAMING_SNAKE_CASE = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__lowerCamelCase , )
print(F'Pushed {checkpoint_name}' )
def lowerCamelCase ( __lowerCamelCase : Path , __lowerCamelCase : str = None , __lowerCamelCase : bool = True ) ->Any:
_SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
_SCREAMING_SNAKE_CASE = 1000
_SCREAMING_SNAKE_CASE = (1, num_labels)
_SCREAMING_SNAKE_CASE = """huggingface/label-files"""
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_SCREAMING_SNAKE_CASE = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = idalabel
_SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
_SCREAMING_SNAKE_CASE = partial(__lowerCamelCase , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid=__lowerCamelCase )
_SCREAMING_SNAKE_CASE = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__lowerCamelCase , names_to_config[model_name] , __lowerCamelCase , __lowerCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
lowercase_ = parser.parse_args()
lowercase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 58
| 0
|
from __future__ import annotations
from typing import Any
def A__ ( __lowerCamelCase ):
if not postfix_notation:
return 0
SCREAMING_SNAKE_CASE_ = {'''+''', '''-''', '''*''', '''/'''}
SCREAMING_SNAKE_CASE_ = []
for token in postfix_notation:
if token in operations:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(__lowerCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 257
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__UpperCAmelCase = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , **_A ) -> Union[str, Any]:
super().__init__(**_A )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(_A )
def __call__( self , _A , _A = None , **_A , ) -> str:
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text_queries''' )
if isinstance(_A , (str, Image.Image) ):
SCREAMING_SNAKE_CASE_ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE_ = image
SCREAMING_SNAKE_CASE_ = super().__call__(_A , **_A )
return results
def _UpperCamelCase ( self , **_A ) -> str:
SCREAMING_SNAKE_CASE_ = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE_ = kwargs['''top_k''']
return {}, {}, postprocess_params
def _UpperCamelCase ( self , _A ) -> Any:
SCREAMING_SNAKE_CASE_ = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE_ = inputs['''candidate_labels''']
if isinstance(_A , _A ):
SCREAMING_SNAKE_CASE_ = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_A ):
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , return_tensors=self.framework )
SCREAMING_SNAKE_CASE_ = self.image_processor(_A , return_tensors=self.framework )
yield {
"is_last": i == len(_A ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _UpperCamelCase ( self , _A ) -> Dict:
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE_ = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE_ = self.model(**_A )
SCREAMING_SNAKE_CASE_ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _UpperCamelCase ( self , _A , _A=0.1 , _A=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE_ = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE_ = BaseModelOutput(_A )
SCREAMING_SNAKE_CASE_ = self.image_processor.post_process_object_detection(
outputs=_A , threshold=_A , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE_ = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE_ = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE_ = {'''score''': score, '''label''': label, '''box''': box}
results.append(_A )
SCREAMING_SNAKE_CASE_ = sorted(_A , key=lambda _A : x["score"] , reverse=_A )
if top_k:
SCREAMING_SNAKE_CASE_ = results[:top_k]
return results
def _UpperCamelCase ( self , _A ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = box.int().tolist()
SCREAMING_SNAKE_CASE_ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 257
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
a_ : Optional[Any] = 5
a_ : str = 10
@require_sentencepiece
@require_tokenizers
class __UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
lowercase : int =SpeechaTextTokenizer
lowercase : int =False
lowercase : List[str] =True
def lowercase__ ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase_ =sp.SentencePieceProcessor()
spm_model.Load(lowerCAmelCase )
lowerCamelCase_ =['''<s>''', '''<pad>''', '''</s>''', '''<unk>''']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(lowerCAmelCase ) )]
lowerCamelCase_ =dict(zip(lowerCAmelCase, range(len(lowerCAmelCase ) ) ) )
lowerCamelCase_ =Path(self.tmpdirname )
save_json(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''vocab_file'''] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(lowerCAmelCase, save_dir / VOCAB_FILES_NAMES['''spm_file'''] )
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''<pad>'''
lowerCamelCase_ =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ), lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ), lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<s>''' )
self.assertEqual(vocab_keys[1], '''<pad>''' )
self.assertEqual(vocab_keys[-1], '''j''' )
self.assertEqual(len(lowerCAmelCase ), 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_001 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCamelCase_ =tokenizer.tokenize('''This is a test''' )
self.assertListEqual(lowerCAmelCase, ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ), [289, 50, 14, 174, 386], )
lowerCamelCase_ =tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''], )
lowerCamelCase_ =tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase, [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCamelCase_ =tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase, [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''], )
@slow
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ={'''input_ids''': [[3_791, 797, 31, 11, 64, 797, 31, 2_429, 433, 12, 1_176, 12, 20, 786, 915, 142, 2_413, 240, 37, 3_238, 797, 31, 11, 35, 93, 915, 142, 2_413, 240, 37, 5_540, 567, 1_276, 93, 37, 610, 40, 62, 455, 657, 1_042, 123, 780, 177, 37, 309, 241, 1_298, 514, 20, 292, 2_737, 114, 2_469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3_388, 511, 459, 4, 3_555, 40, 321, 302, 705, 4, 3_388, 511, 583, 326, 5, 5, 5, 62, 3_310, 560, 177, 2_680, 217, 1_508, 32, 31, 853, 418, 64, 583, 511, 1_605, 62, 35, 93, 560, 177, 2_680, 217, 1_508, 1_521, 64, 583, 511, 519, 62, 20, 1_515, 764, 20, 149, 261, 5_625, 7_972, 20, 5_540, 567, 1_276, 93, 3_925, 1_675, 11, 15, 802, 7_972, 576, 217, 1_508, 11, 35, 93, 1_253, 2_441, 15, 289, 652, 31, 416, 321, 3_842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2_681, 1_153, 3_434, 20, 5_540, 37, 567, 126, 1_253, 2_441, 3_376, 449, 210, 431, 1_563, 177, 767, 5_540, 11, 1_203, 472, 11, 2_953, 685, 285, 364, 706, 1_153, 20, 6_799, 20, 2_869, 20, 4_464, 126, 40, 2_429, 20, 1_040, 866, 2_664, 418, 20, 318, 20, 1_726, 186, 20, 265, 522, 35, 93, 2_191, 4_634, 20, 1_040, 12, 6_799, 15, 228, 2_356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_575, 2_666, 684, 1_582, 1_176, 12, 627, 149, 619, 20, 4_902, 563, 11, 20, 149, 261, 3_420, 2_356, 174, 142, 4_714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase, model_name='''facebook/s2t-small-mustc-en-de-st''', revision='''a14f04cf0776c02f62a8cb800cf7909e15ea23ad''', )
@require_sentencepiece
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Tuple ='valhalla/s2t_mustc_multilinguial_medium'
lowercase : Dict ='C\'est trop cool'
lowercase : str ='Esto es genial'
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
lowerCamelCase_ =SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['''pt'''], 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['''ru'''], 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['''it'''], 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['''de'''], 11 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size, 10_000 )
def lowercase__ ( self ):
"""simple docstring"""
self.assertIn(lowerCAmelCase, self.tokenizer.all_special_ids )
lowerCamelCase_ =[ES_CODE, 4, 1_601, 47, 7_647, 2]
lowerCamelCase_ =self.tokenizer.decode(lowerCAmelCase, skip_special_tokens=lowerCAmelCase )
lowerCamelCase_ =self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase )
self.assertEqual(lowerCAmelCase, lowerCAmelCase )
self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
lowerCamelCase_ =self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0], lowerCAmelCase )
self.assertEqual(encoded[-1], self.tokenizer.eos_token_id )
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ ='''fr'''
self.assertListEqual(self.tokenizer.prefix_tokens, [FR_CODE] )
lowerCamelCase_ ='''es'''
self.assertListEqual(self.tokenizer.prefix_tokens, [ES_CODE] )
| 75
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 191
| 0
|
import requests
__UpperCAmelCase = "" # <-- Put your OpenWeatherMap appid here!
__UpperCAmelCase = "https://api.openweathermap.org/data/2.5/"
def A__ ( __lowerCamelCase = "Chicago", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + '''weather''', params=locals() ).json()
def A__ ( __lowerCamelCase = "Kolkata, India", __lowerCamelCase = APPID ):
return requests.get(URL_BASE + '''forecast''', params=locals() ).json()
def A__ ( __lowerCamelCase = 55.68, __lowerCamelCase = 12.57, __lowerCamelCase = APPID ):
return requests.get(URL_BASE + '''onecall''', params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
__UpperCAmelCase = input("Enter a location:").strip()
if location:
pprint(current_weather(location))
else:
break
| 359
|
from graphs.minimum_spanning_tree_kruskal import kruskal
def A__ ( ):
SCREAMING_SNAKE_CASE_ = 9
SCREAMING_SNAKE_CASE_ = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
SCREAMING_SNAKE_CASE_ = kruskal(__lowerCamelCase, __lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__lowerCamelCase ) == sorted(__lowerCamelCase )
| 257
| 0
|
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Dict = logging.get_logger(__name__)
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Dict , __lowerCAmelCase : str ):
a__ = os.path.abspath(__lowerCAmelCase )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
a__ = tf.train.list_variables(__lowerCAmelCase )
a__ = []
a__ = []
a__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
a__ = full_name.split('/' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
a__ = name[1:]
# figure out how many levels deep the name is
a__ = 0
for _name in name:
if _name.startswith('layer_with_weights' ):
depth += 1
else:
break
layer_depth.append(__lowerCAmelCase )
# read data
a__ = tf.train.load_variable(__lowerCAmelCase , __lowerCAmelCase )
names.append('/'.join(__lowerCAmelCase ) )
arrays.append(__lowerCAmelCase )
logger.info(F'Read a total of {len(__lowerCAmelCase ):,} layers' )
# Sanity check
if len(set(__lowerCAmelCase ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(__lowerCAmelCase ) )})' )
a__ = list(set(__lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'
' heads.' )
# convert layers
logger.info('Converting weights...' )
for full_name, array in zip(__lowerCAmelCase , __lowerCAmelCase ):
a__ = full_name.split('/' )
a__ = model
a__ = []
for i, m_name in enumerate(__lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('layer_with_weights' ):
a__ = int(m_name.split('-' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['embeddings', 'LayerNorm'] )
a__ = getattr(__lowerCAmelCase , 'embeddings' )
a__ = getattr(__lowerCAmelCase , 'LayerNorm' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['encoder', 'layer', str(layer_num - 4 )] )
a__ = getattr(__lowerCAmelCase , 'encoder' )
a__ = getattr(__lowerCAmelCase , 'layer' )
a__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['pooler', 'dense'] )
a__ = getattr(__lowerCAmelCase , 'pooler' )
a__ = getattr(__lowerCAmelCase , 'dense' )
elif m_name == "embeddings":
trace.append('embeddings' )
a__ = getattr(__lowerCAmelCase , 'embeddings' )
if layer_num == 0:
trace.append('word_embeddings' )
a__ = getattr(__lowerCAmelCase , 'word_embeddings' )
elif layer_num == 1:
trace.append('position_embeddings' )
a__ = getattr(__lowerCAmelCase , 'position_embeddings' )
elif layer_num == 2:
trace.append('token_type_embeddings' )
a__ = getattr(__lowerCAmelCase , 'token_type_embeddings' )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append('weight' )
a__ = getattr(__lowerCAmelCase , 'weight' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['attention', 'self'] )
a__ = getattr(__lowerCAmelCase , 'attention' )
a__ = getattr(__lowerCAmelCase , 'self' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['attention', 'output', 'LayerNorm'] )
a__ = getattr(__lowerCAmelCase , 'attention' )
a__ = getattr(__lowerCAmelCase , 'output' )
a__ = getattr(__lowerCAmelCase , 'LayerNorm' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['attention', 'output', 'dense'] )
a__ = getattr(__lowerCAmelCase , 'attention' )
a__ = getattr(__lowerCAmelCase , 'output' )
a__ = getattr(__lowerCAmelCase , 'dense' )
elif m_name == "_output_dense":
# output dense
trace.extend(['output', 'dense'] )
a__ = getattr(__lowerCAmelCase , 'output' )
a__ = getattr(__lowerCAmelCase , 'dense' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['output', 'LayerNorm'] )
a__ = getattr(__lowerCAmelCase , 'output' )
a__ = getattr(__lowerCAmelCase , 'LayerNorm' )
elif m_name == "_key_dense":
# attention key
trace.append('key' )
a__ = getattr(__lowerCAmelCase , 'key' )
elif m_name == "_query_dense":
# attention query
trace.append('query' )
a__ = getattr(__lowerCAmelCase , 'query' )
elif m_name == "_value_dense":
# attention value
trace.append('value' )
a__ = getattr(__lowerCAmelCase , 'value' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['intermediate', 'dense'] )
a__ = getattr(__lowerCAmelCase , 'intermediate' )
a__ = getattr(__lowerCAmelCase , 'dense' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('output' )
a__ = getattr(__lowerCAmelCase , 'output' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('bias' )
a__ = getattr(__lowerCAmelCase , 'bias' )
elif m_name in ["kernel", "gamma"]:
trace.append('weight' )
a__ = getattr(__lowerCAmelCase , 'weight' )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
a__ = '.'.join(__lowerCAmelCase )
if re.match(R'(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)' , __lowerCAmelCase ) or re.match(
R'(\S+)\.attention\.output\.dense\.weight' , __lowerCAmelCase ):
a__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
a__ = array.transpose()
if pointer.shape == array.shape:
a__ = torch.from_numpy(__lowerCAmelCase )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ):
logger.info(F'Loading model based on config from {config_path}...' )
a__ = BertConfig.from_json_file(__lowerCAmelCase )
a__ = BertModel(__lowerCAmelCase )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
snake_case : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--tf_checkpoint_path''', type=str, required=True, help='''Path to the TensorFlow 2.x checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
type=str,
required=True,
help='''The config json file corresponding to the BERT model. This specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''',
type=str,
required=True,
help='''Path to the output PyTorch model (must include filename).''',
)
snake_case : Union[str, Any] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 240
|
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class _a :
def __init__( self: Tuple , UpperCamelCase_: int , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[Any]=7 , UpperCamelCase_: Optional[Any]=6 , UpperCamelCase_: Any=17 , UpperCamelCase_: str=23 , UpperCamelCase_: List[Any]=11 , UpperCamelCase_: Optional[int]=True , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = act_dim
lowercase__ = state_dim
lowercase__ = hidden_size
lowercase__ = max_length
lowercase__ = is_training
def lowerCamelCase_ ( self: Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.state_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, self.act_dim) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = floats_tensor((self.batch_size, self.seq_length, 1) )
lowercase__ = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000 )
lowercase__ = random_attention_mask((self.batch_size, self.seq_length) )
lowercase__ = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def lowerCamelCase_ ( self: Dict ) -> List[Any]:
"""simple docstring"""
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def lowerCamelCase_ ( self: List[str] , UpperCamelCase_: int , UpperCamelCase_: List[str] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: List[Any] , UpperCamelCase_: int , ) -> Dict:
"""simple docstring"""
lowercase__ = DecisionTransformerModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.parent.assertEqual(result.state_preds.shape , states.shape )
self.parent.assertEqual(result.action_preds.shape , actions.shape )
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions
def lowerCamelCase_ ( self: List[Any] ) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {
'''states''': states,
'''actions''': actions,
'''rewards''': rewards,
'''returns_to_go''': returns_to_go,
'''timesteps''': timesteps,
'''attention_mask''': attention_mask,
}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (DecisionTransformerModel,) if is_torch_available() else ()
_lowercase : List[str] = ()
_lowercase : List[Any] = {'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
_lowercase : Any = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
_lowercase : Tuple = False
_lowercase : str = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Tuple = False
_lowercase : Optional[Any] = False
_lowercase : Optional[int] = False
def lowerCamelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = DecisionTransformerModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Any ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self: int ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
@slow
def lowerCamelCase_ ( self: Dict ) -> Optional[Any]:
"""simple docstring"""
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = DecisionTransformerModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCamelCase_ ( self: List[str] ) -> Dict:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = [
'''states''',
'''actions''',
'''rewards''',
'''returns_to_go''',
'''timesteps''',
'''attention_mask''',
]
self.assertListEqual(arg_names[: len(UpperCamelCase_ )] , UpperCamelCase_ )
@require_torch
class _a ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
lowercase__ = 2 # number of steps of autoregressive prediction we will perform
lowercase__ = 10 # defined by the RL environment, may be normalized
lowercase__ = DecisionTransformerModel.from_pretrained('''edbeeching/decision-transformer-gym-hopper-expert''' )
lowercase__ = model.to(UpperCamelCase_ )
lowercase__ = model.config
torch.manual_seed(0 )
lowercase__ = torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ) # env.reset()
lowercase__ = torch.tensor(
[[0.242793, -0.28693074, 0.8742613], [0.67815274, -0.08101085, -0.12952147]] , device=UpperCamelCase_ )
lowercase__ = torch.tensor(UpperCamelCase_ , device=UpperCamelCase_ , dtype=torch.floataa ).reshape(1 , 1 , 1 )
lowercase__ = state
lowercase__ = torch.zeros(1 , 0 , config.act_dim , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.zeros(1 , 0 , device=UpperCamelCase_ , dtype=torch.floataa )
lowercase__ = torch.tensor(0 , device=UpperCamelCase_ , dtype=torch.long ).reshape(1 , 1 )
for step in range(UpperCamelCase_ ):
lowercase__ = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.cat([rewards, torch.zeros(1 , 1 , device=UpperCamelCase_ )] , dim=1 )
lowercase__ = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device )
with torch.no_grad():
lowercase__ , lowercase__ , lowercase__ = model(
states=UpperCamelCase_ , actions=UpperCamelCase_ , rewards=UpperCamelCase_ , returns_to_go=UpperCamelCase_ , timesteps=UpperCamelCase_ , attention_mask=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
self.assertEqual(action_pred.shape , actions.shape )
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1E-4 ) )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim ).to(device=UpperCamelCase_ , dtype=torch.floataa ),
1.0,
False,
{},
)
lowercase__ = action_pred[0, -1]
lowercase__ = torch.cat([states, state] , dim=1 )
lowercase__ = returns_to_go[0, -1] - reward
lowercase__ = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 )
lowercase__ = torch.cat(
[timesteps, torch.ones((1, 1) , device=UpperCamelCase_ , dtype=torch.long ) * (step + 1)] , dim=1 )
| 110
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
SCREAMING_SNAKE_CASE_ = False
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type="""numpy""" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type="""numpy""" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE__ ( self : str ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = VersatileDiffusionTextToImagePipeline.from_pretrained(
"""shi-labs/versatile-diffusion""" ,torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger """
SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase__ ,generator=lowerCamelCase__ ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type="""numpy""" ).images
SCREAMING_SNAKE_CASE = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 193
|
from itertools import permutations
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(_SCREAMING_SNAKE_CASE ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowercase ( _SCREAMING_SNAKE_CASE = 10 ) -> int:
'''simple docstring'''
return sum(
int("""""".join(map(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
for num in permutations(range(_SCREAMING_SNAKE_CASE ) )
if is_substring_divisible(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 193
| 1
|
import math
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 43
|
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowerCamelCase = logging.getLogger(__name__)
__lowerCamelCase = """pytorch_model.bin"""
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,)
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=A_ ,metadata={"help": "The name of the task to train on."} ,)
A__ : Optional[List[str]] = dataclasses.field(
default=A_ ,metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class UpperCAmelCase :
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" ,metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} ,)
A__ : Optional[int] = dataclasses.field(
default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,)
A__ : Optional[bool] = dataclasses.field(
default=A_ ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,)
A__ : Optional[float] = dataclasses.field(
default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,)
A__ : Optional[int] = dataclasses.field(
default=1_00 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,)
A__ : Optional[int] = dataclasses.field(
default=A_ ,metadata={"help": "Random seed for initialization."} ,)
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ):
snake_case : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
snake_case : Optional[int] = dataset.filter(lambda __lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
snake_case : int = int(eval_result * len(__lowerCamelCase ) )
print(__lowerCamelCase )
snake_case : List[str] = dataset.sort("probability" , reverse=__lowerCamelCase )
snake_case : Tuple = dataset.select(range(__lowerCamelCase ) )
snake_case : List[Any] = dataset.remove_columns(["label", "probability"] )
snake_case : Any = dataset.rename_column("prediction" , "label" )
snake_case : str = dataset.map(lambda __lowerCamelCase : {"label": idalabel[example["label"]]} )
snake_case : List[str] = dataset.shuffle(seed=args.seed )
snake_case : int = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(__lowerCamelCase , index=__lowerCamelCase )
else:
dataset.to_json(__lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ):
snake_case : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
snake_case : Dict = STModelArguments(model_name_or_path=__lowerCamelCase )
snake_case : Tuple = STDataArguments(train_file=__lowerCamelCase , infer_file=__lowerCamelCase )
snake_case : str = STTrainingArguments(output_dir=__lowerCamelCase )
snake_case : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(__lowerCamelCase ).items():
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for key, value in kwargs.items():
if hasattr(__lowerCamelCase , __lowerCamelCase ):
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Sanity checks
snake_case : List[str] = {}
snake_case : Optional[int] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
snake_case : str = args.train_file
snake_case : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
snake_case : Tuple = args.eval_file
for key in data_files:
snake_case : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
snake_case : Union[str, Any] = extension
else:
assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
snake_case : List[Any] = f"""{args.output_dir}/self-train_iter-{{}}""".format
snake_case : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=__lowerCamelCase )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : Dict = None
snake_case : Union[str, Any] = None
snake_case : Tuple = 0
snake_case : List[Any] = False
# Show the progress bar
snake_case : List[Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
snake_case : str = data_dir_format(__lowerCamelCase )
assert os.path.exists(__lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-1" )
snake_case : Optional[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(__lowerCamelCase , __lowerCamelCase ):
arguments_dict.update({key: value} )
snake_case : int = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , __lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
snake_case : str = os.path.join(__lowerCamelCase , "best-checkpoint" )
snake_case : Dict = os.path.join(__lowerCamelCase , "stage-2" )
# Update arguments_dict
snake_case : List[str] = model_path
snake_case : Optional[Any] = data_files["train"]
snake_case : Optional[Any] = current_output_dir
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "best-checkpoint" , __lowerCamelCase )
if os.path.exists(__lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , __lowerCamelCase , __lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , __lowerCamelCase )
finetune(**__lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(__lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , __lowerCamelCase )
snake_case : int = iteration
snake_case : Tuple = data_dir_format(iteration + 1 )
snake_case : Tuple = AutoConfig.from_pretrained(os.path.join(__lowerCamelCase , "best-checkpoint" ) )
snake_case : Optional[int] = config.idalabel
snake_case : List[Any] = os.path.join(__lowerCamelCase , "eval_results_best-checkpoint.json" )
snake_case : Union[str, Any] = os.path.join(__lowerCamelCase , "test_results_best-checkpoint.json" )
assert os.path.exists(__lowerCamelCase )
with open(__lowerCamelCase , "r" ) as f:
snake_case : Dict = float(json.load(__lowerCamelCase )[args.eval_metric] )
snake_case : Optional[int] = os.path.join(__lowerCamelCase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(__lowerCamelCase )
# Loading the dataset from local csv or json files.
snake_case : Optional[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
snake_case : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(__lowerCamelCase ):
shutil.copy(__lowerCamelCase , os.path.join(__lowerCamelCase , f"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
accelerator.wait_for_everyone()
snake_case : str = os.path.join(__lowerCamelCase , f"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
snake_case : List[Any] = eval_result
if best_iteration is None:
snake_case : List[Any] = new_iteration
snake_case : int = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
snake_case : int = new_iteration
snake_case : Union[str, Any] = new_eval_result
snake_case : str = 0
else:
if new_eval_result == best_eval_result:
snake_case : Any = new_iteration
snake_case : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
snake_case : Tuple = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , __lowerCamelCase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{iteration}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , __lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(__lowerCamelCase , f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(__lowerCamelCase , "eval_results_best-iteration.json" ) , )
| 59
| 0
|
"""simple docstring"""
from __future__ import annotations
class __A :
'''simple docstring'''
def __init__( self : List[Any] ,_snake_case : int ) -> Optional[int]:
"""simple docstring"""
lowercase__ : str = data
lowercase__ : Optional[int] = None
lowercase__ : Optional[int] = None
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __UpperCAmelCase ( __lowerCamelCase ) -> Dict:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __UpperCAmelCase ( ) -> Dict: # Main function for testing.
lowercase__ : Any = Node(1 )
lowercase__ : Any = Node(2 )
lowercase__ : List[Any] = Node(3 )
lowercase__ : Any = Node(4 )
lowercase__ : Optional[Any] = Node(5 )
lowercase__ : List[str] = Node(6 )
lowercase__ : Any = Node(7 )
lowercase__ : Union[str, Any] = Node(8 )
lowercase__ : Union[str, Any] = Node(9 )
print(is_full_binary_tree(UpperCamelCase__ ) )
print(depth_of_tree(UpperCamelCase__ ) )
print('''Tree is: ''' )
display(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 363
|
"""simple docstring"""
from __future__ import annotations
lowerCAmelCase_ = 1.6021E-19 # units = C
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 302
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.