code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def _a ( lowerCamelCase_ , lowerCamelCase_=0.9_99 , lowerCamelCase_="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCamelCase_ ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCamelCase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
snake_case : int =[]
for i in range(lowerCamelCase_ ):
snake_case : Optional[int] =i / num_diffusion_timesteps
snake_case : Optional[Any] =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) , lowerCamelCase_ ) )
return torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
class lowerCAmelCase_ ( __a , __a ):
__UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase = 2
@register_to_config
def __init__( self : Union[str, Any], _snake_case : int = 1_000, _snake_case : float = 0.0_0085, _snake_case : float = 0.012, _snake_case : str = "linear", _snake_case : Optional[Union[np.ndarray, List[float]]] = None, _snake_case : str = "epsilon", _snake_case : Optional[bool] = False, _snake_case : Optional[bool] = False, _snake_case : float = 1.0, _snake_case : str = "linspace", _snake_case : int = 0, ):
'''simple docstring'''
if trained_betas is not None:
snake_case : Optional[Any] =torch.tensor(lowerCAmelCase__, dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case : Optional[int] =torch.linspace(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case : Optional[Any] =(
torch.linspace(beta_start**0.5, beta_end**0.5, lowerCAmelCase__, dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case : str =betas_for_alpha_bar(lowerCAmelCase__, alpha_transform_type='''cosine''' )
elif beta_schedule == "exp":
snake_case : List[Any] =betas_for_alpha_bar(lowerCAmelCase__, alpha_transform_type='''exp''' )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
snake_case : Dict =1.0 - self.betas
snake_case : Tuple =torch.cumprod(self.alphas, dim=0 )
# set all values
self.set_timesteps(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ )
snake_case : Optional[int] =use_karras_sigmas
def __snake_case ( self : List[Any], _snake_case : List[Any], _snake_case : Optional[int]=None ):
'''simple docstring'''
if schedule_timesteps is None:
snake_case : List[str] =self.timesteps
snake_case : List[Any] =(schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case : Any =1 if len(lowerCAmelCase__ ) > 1 else 0
else:
snake_case : Union[str, Any] =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
snake_case : int =self._index_counter[timestep_int]
return indices[pos].item()
@property
def __snake_case ( self : Tuple ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __snake_case ( self : str, _snake_case : torch.FloatTensor, _snake_case : Union[float, torch.FloatTensor], ):
'''simple docstring'''
snake_case : Any =self.index_for_timestep(lowerCAmelCase__ )
snake_case : List[Any] =self.sigmas[step_index]
snake_case : List[str] =sample / ((sigma**2 + 1) ** 0.5)
return sample
def __snake_case ( self : Union[str, Any], _snake_case : int, _snake_case : Union[str, torch.device] = None, _snake_case : Optional[int] = None, ):
'''simple docstring'''
snake_case : int =num_inference_steps
snake_case : int =num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case : Union[str, Any] =np.linspace(0, num_train_timesteps - 1, lowerCAmelCase__, dtype=lowerCAmelCase__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case : Tuple =num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case : str =(np.arange(0, lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case : Dict =num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case : List[str] =(np.arange(lowerCAmelCase__, 0, -step_ratio )).round().copy().astype(lowerCAmelCase__ )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
snake_case : int =np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case : int =np.log(lowerCAmelCase__ )
snake_case : Optional[int] =np.interp(lowerCAmelCase__, np.arange(0, len(lowerCAmelCase__ ) ), lowerCAmelCase__ )
if self.config.use_karras_sigmas:
snake_case : List[Any] =self._convert_to_karras(in_sigmas=lowerCAmelCase__, num_inference_steps=self.num_inference_steps )
snake_case : List[Any] =np.array([self._sigma_to_t(lowerCAmelCase__, lowerCAmelCase__ ) for sigma in sigmas] )
snake_case : List[Any] =np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case : List[str] =torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ )
snake_case : Union[str, Any] =torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
snake_case : str =torch.from_numpy(lowerCAmelCase__ )
snake_case : List[Any] =torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCAmelCase__ ).startswith('''mps''' ):
# mps does not support float64
snake_case : Optional[int] =timesteps.to(lowerCAmelCase__, dtype=torch.floataa )
else:
snake_case : Union[str, Any] =timesteps.to(device=lowerCAmelCase__ )
# empty dt and derivative
snake_case : Any =None
snake_case : Any =None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case : List[str] =defaultdict(lowerCAmelCase__ )
def __snake_case ( self : List[str], _snake_case : Union[str, Any], _snake_case : Dict ):
'''simple docstring'''
snake_case : Tuple =np.log(lowerCAmelCase__ )
# get distribution
snake_case : int =log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
snake_case : int =np.cumsum((dists >= 0), axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
snake_case : List[str] =low_idx + 1
snake_case : Tuple =log_sigmas[low_idx]
snake_case : Any =log_sigmas[high_idx]
# interpolate sigmas
snake_case : Any =(low - log_sigma) / (low - high)
snake_case : Tuple =np.clip(lowerCAmelCase__, 0, 1 )
# transform interpolation to time range
snake_case : Optional[int] =(1 - w) * low_idx + w * high_idx
snake_case : Optional[int] =t.reshape(sigma.shape )
return t
def __snake_case ( self : Optional[int], _snake_case : torch.FloatTensor, _snake_case : Tuple ):
'''simple docstring'''
snake_case : Dict =in_sigmas[-1].item()
snake_case : Any =in_sigmas[0].item()
snake_case : int =7.0 # 7.0 is the value used in the paper
snake_case : Tuple =np.linspace(0, 1, lowerCAmelCase__ )
snake_case : Optional[int] =sigma_min ** (1 / rho)
snake_case : Dict =sigma_max ** (1 / rho)
snake_case : List[Any] =(max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def __snake_case ( self : Dict ):
'''simple docstring'''
return self.dt is None
def __snake_case ( self : List[Any], _snake_case : Union[torch.FloatTensor, np.ndarray], _snake_case : Union[float, torch.FloatTensor], _snake_case : Union[torch.FloatTensor, np.ndarray], _snake_case : bool = True, ):
'''simple docstring'''
snake_case : str =self.index_for_timestep(lowerCAmelCase__ )
# advance index counter by 1
snake_case : List[Any] =timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case : List[Any] =self.sigmas[step_index]
snake_case : Tuple =self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
snake_case : Any =self.sigmas[step_index - 1]
snake_case : List[Any] =self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case : Union[str, Any] =0
snake_case : int =sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case : Optional[Any] =sigma_hat if self.state_in_first_order else sigma_next
snake_case : Optional[int] =sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case : Any =sigma_hat if self.state_in_first_order else sigma_next
snake_case : List[Any] =model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
snake_case : Tuple =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
snake_case : Union[str, Any] =pred_original_sample.clamp(
-self.config.clip_sample_range, self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case : Optional[Any] =(sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case : Tuple =sigma_next - sigma_hat
# store for 2nd order step
snake_case : int =derivative
snake_case : List[str] =dt
snake_case : Dict =sample
else:
# 2. 2nd order / Heun's method
snake_case : Optional[Any] =(sample - pred_original_sample) / sigma_next
snake_case : Optional[int] =(self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
snake_case : Tuple =self.dt
snake_case : Any =self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
snake_case : Union[str, Any] =None
snake_case : List[Any] =None
snake_case : int =None
snake_case : int =sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCAmelCase__ )
def __snake_case ( self : List[str], _snake_case : torch.FloatTensor, _snake_case : torch.FloatTensor, _snake_case : torch.FloatTensor, ):
'''simple docstring'''
snake_case : int =self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ):
# mps does not support float64
snake_case : List[Any] =self.timesteps.to(original_samples.device, dtype=torch.floataa )
snake_case : Optional[Any] =timesteps.to(original_samples.device, dtype=torch.floataa )
else:
snake_case : Union[str, Any] =self.timesteps.to(original_samples.device )
snake_case : List[Any] =timesteps.to(original_samples.device )
snake_case : str =[self.index_for_timestep(lowerCAmelCase__, lowerCAmelCase__ ) for t in timesteps]
snake_case : str =sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case : Optional[int] =sigma.unsqueeze(-1 )
snake_case : Tuple =original_samples + noise * sigma
return noisy_samples
def __len__( self : Dict ):
'''simple docstring'''
return self.config.num_train_timesteps
| 349
|
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
_snake_case : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(__a )
class UpperCamelCase_ ( __a ):
'''simple docstring'''
def __init__( self :Tuple , **lowerCAmelCase__ :Dict ) ->int:
super().__init__(**lowerCAmelCase__ )
requires_backends(self , "vision" )
requires_backends(self , "torch" )
if self.framework != "pt":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
self.check_model_type(lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Tuple , **lowerCAmelCase__ :Union[str, Any] ) ->List[Any]:
lowercase = {}
lowercase = {}
lowercase = {}
# preprocess args
if "points_per_batch" in kwargs:
lowercase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
lowercase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
lowercase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
lowercase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
lowercase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
lowercase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
lowercase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
lowercase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
lowercase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
lowercase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
lowercase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
lowercase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self :List[str] , lowerCAmelCase__ :int , *lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=None , lowerCAmelCase__ :Any=None , **lowerCAmelCase__ :List[str] ) ->Optional[int]:
return super().__call__(lowerCAmelCase__ , *lowerCAmelCase__ , num_workers=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , **lowerCAmelCase__ )
def SCREAMING_SNAKE_CASE( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :float = 512 / 1500 , lowerCAmelCase__ :Optional[int] = 32 , lowerCAmelCase__ :Optional[int] = 1 , ) ->Any:
lowercase = load_image(lowerCAmelCase__ )
lowercase = self.image_processor.size["longest_edge"]
lowercase , lowercase , lowercase , lowercase = self.image_processor.generate_crop_boxes(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = self.image_processor(images=lowerCAmelCase__ , return_tensors="pt" )
with self.device_placement():
if self.framework == "pt":
lowercase = self.get_inference_context()
with inference_context():
lowercase = self._ensure_tensor_on_device(lowerCAmelCase__ , device=self.device )
lowercase = self.model.get_image_embeddings(model_inputs.pop("pixel_values" ) )
lowercase = image_embeddings
lowercase = grid_points.shape[1]
lowercase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. "
"To return all points at once, set points_per_batch to None" )
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase = grid_points[:, i : i + points_per_batch, :, :]
lowercase = input_labels[:, i : i + points_per_batch]
lowercase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def SCREAMING_SNAKE_CASE( self :Union[str, Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Dict=0.88 , lowerCAmelCase__ :Dict=0.95 , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :int=1 , ) ->str:
lowercase = model_inputs.pop("input_boxes" )
lowercase = model_inputs.pop("is_last" )
lowercase = model_inputs.pop("original_sizes" ).tolist()
lowercase = model_inputs.pop("reshaped_input_sizes" ).tolist()
lowercase = self.model(**lowerCAmelCase__ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
lowercase = model_outputs["pred_masks"]
lowercase = self.image_processor.post_process_masks(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , binarize=lowerCAmelCase__ )
lowercase = model_outputs["iou_scores"]
lowercase , lowercase , lowercase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def SCREAMING_SNAKE_CASE( self :Any , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any]=False , lowerCAmelCase__ :str=False , lowerCAmelCase__ :int=0.7 , ) ->List[Any]:
lowercase = []
lowercase = []
lowercase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("iou_scores" ) )
all_masks.extend(model_output.pop("masks" ) )
all_boxes.append(model_output.pop("boxes" ) )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase = torch.cat(lowerCAmelCase__ )
lowercase , lowercase , lowercase , lowercase = self.image_processor.post_process_for_mask_generation(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase = defaultdict(lowerCAmelCase__ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(lowerCAmelCase__ )
lowercase = {}
if output_rle_mask:
lowercase = rle_mask
if output_bboxes_mask:
lowercase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
| 441
| 0
|
import os
def lowerCamelCase__ ( ):
'''simple docstring'''
with open(os.path.dirname(_A ) + "/p022_names.txt" ) as file:
snake_case_ = str(file.readlines()[0] )
snake_case_ = names.replace("\"" , "" ).split("," )
names.sort()
snake_case_ = 0
snake_case_ = 0
for i, name in enumerate(_A ):
for letter in name:
name_score += ord(_A ) - 64
total_score += (i + 1) * name_score
snake_case_ = 0
return total_score
if __name__ == "__main__":
print(solution())
| 139
|
import baseaa
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return baseaa.baadecode(_A ).decode("utf-8" )
if __name__ == "__main__":
lowercase__ : str = "Hello World!"
lowercase__ : Tuple = baseaa_encode(test)
print(encoded)
lowercase__ : Optional[Any] = baseaa_decode(encoded)
print(decoded)
| 139
| 1
|
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__lowercase : Optional[int] = logging.get_logger(__name__)
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
if "." in tensor_name:
__a : List[str] = tensor_name.split('.' )
for split in splits[:-1]:
__a : Optional[Any] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
__a : Union[str, Any] = new_module
__a : int = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F"""{module} does not have a parameter or a buffer named {tensor_name}.""" )
__a : List[str] = tensor_name in module._buffers
__a : Optional[int] = getattr(UpperCamelCase__ , UpperCamelCase__ )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F"""{tensor_name} is on the meta device, we need a `value` to put in on {device}.""" )
__a : List[Any] = False
__a : Dict = False
if is_buffer or not is_bitsandbytes_available():
__a : Any = False
__a : List[Any] = False
else:
__a : Union[str, Any] = hasattr(bnb.nn , 'Params4bit' ) and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit )
__a : List[Any] = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams )
if is_abit or is_abit:
__a : int = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__a : List[str] = old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
__a : Tuple = value.to('cpu' )
if value.dtype == torch.inta:
__a : Optional[Any] = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__a : int = torch.tensor(UpperCamelCase__ , device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , UpperCamelCase__ ) and fpaa_statistics is None:
__a : Any = new_value.T
__a : Optional[Any] = old_value.__dict__
if is_abit:
__a : int = bnb.nn.IntaParams(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ )
elif is_abit:
__a : Optional[int] = bnb.nn.Paramsabit(UpperCamelCase__ , requires_grad=UpperCamelCase__ , **UpperCamelCase__ ).to(UpperCamelCase__ )
__a : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , 'SCB' , fpaa_statistics.to(UpperCamelCase__ ) )
else:
if value is None:
__a : str = old_value.to(UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , torch.Tensor ):
__a : Union[str, Any] = value.to(UpperCamelCase__ )
else:
__a : int = torch.tensor(UpperCamelCase__ , device=UpperCamelCase__ )
if is_buffer:
__a : Optional[Any] = new_value
else:
__a : Optional[Any] = nn.Parameter(UpperCamelCase__ , requires_grad=old_value.requires_grad )
__a : List[str] = new_value
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : str=None , _SCREAMING_SNAKE_CASE : Any=False ):
for name, module in model.named_children():
if current_key_name is None:
__a : List[Any] = []
current_key_name.append(UpperCamelCase__ )
if (isinstance(UpperCamelCase__ , nn.Linear ) or isinstance(UpperCamelCase__ , UpperCamelCase__ )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(UpperCamelCase__ ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__a , __a : Optional[int] = module.weight.shape
else:
__a : List[str] = module.in_features
__a : Optional[int] = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__a : Tuple = bnb.nn.LinearabitLt(
UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
__a : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__a : Optional[int] = bnb.nn.Linearabit(
UpperCamelCase__ , UpperCamelCase__ , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
__a : Optional[int] = True
# Store the module class in case we need to transpose the weight later
__a : Any = type(UpperCamelCase__ )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(UpperCamelCase__ )
if len(list(module.children() ) ) > 0:
__a , __a : Union[str, Any] = _replace_with_bnb_linear(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , has_been_replaced=UpperCamelCase__ , )
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=None ):
__a : Union[str, Any] = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__a , __a : List[str] = _replace_with_bnb_linear(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Dict ):
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' , UpperCamelCase__ , )
return replace_with_bnb_linear(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase (*_SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : Optional[Any] ):
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' , UpperCamelCase__ , )
return set_module_quantized_tensor_to_device(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Any ):
__a : int = deepcopy(UpperCamelCase__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__a : int = find_tied_parameters(UpperCamelCase__ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__a : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
__a : str = sum(UpperCamelCase__ , [] )
__a : str = len(UpperCamelCase__ ) > 0
# Check if it is a base model
__a : Dict = not hasattr(UpperCamelCase__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__a : List[Any] = list(model.named_children() )
__a : str = [list_modules[-1][0]]
# add last module together with tied weights
__a : Union[str, Any] = set(UpperCamelCase__ ) - set(UpperCamelCase__ )
__a : Tuple = list(set(UpperCamelCase__ ) ) + list(UpperCamelCase__ )
# remove ".weight" from the keys
__a : Tuple = ['.weight', '.bias']
__a : Dict = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__a : Optional[int] = name.replace(UpperCamelCase__ , '' )
filtered_module_names.append(UpperCamelCase__ )
return filtered_module_names
| 476
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase : List[str] = None
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowercase : str = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
_lowercase : List[str] = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase : Dict = "▁"
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase = AlbertTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__=True , a__=False , a__="[CLS]" , a__="[SEP]" , a__="<unk>" , a__="[SEP]" , a__="<pad>" , a__="[CLS]" , a__="[MASK]" , **a__ , ) -> Dict:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(a__ , lstrip=a__ , rstrip=a__ , normalized=a__ )
if isinstance(a__ , a__ )
else mask_token
)
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , )
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , a__ , a__ = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , a__ , a__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
a__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ):
copyfile(self.vocab_file , a__ )
return (out_vocab_file,)
| 641
| 0
|
'''simple docstring'''
_snake_case = 65_521
def __lowerCamelCase ( _lowercase ) -> int:
UpperCamelCase = 1
UpperCamelCase = 0
for plain_chr in plain_text:
UpperCamelCase = (a + ord(_lowercase )) % MOD_ADLER
UpperCamelCase = (b + a) % MOD_ADLER
return (b << 16) | a
| 707
|
def __lowerCamelCase ( _lowercase ) -> int:
assert (
isinstance(_lowercase , _lowercase ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase , UpperCamelCase = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase , UpperCamelCase = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[Any] = """roberta"""
def __init__( self :List[Any] , __lowercase :str=5_0265 , __lowercase :int=768 , __lowercase :List[Any]=12 , __lowercase :Any=12 , __lowercase :Tuple=3072 , __lowercase :str="gelu" , __lowercase :Tuple=0.1 , __lowercase :Tuple=0.1 , __lowercase :List[Any]=512 , __lowercase :int=2 , __lowercase :Any=0.02 , __lowercase :Tuple=1e-1_2 , __lowercase :Tuple=1 , __lowercase :Optional[Any]=0 , __lowercase :Optional[Any]=2 , __lowercase :Union[str, Any]="absolute" , __lowercase :int=True , __lowercase :List[str]=None , **__lowercase :List[Any] , ):
super().__init__(pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
__lowerCamelCase : List[Any] =vocab_size
__lowerCamelCase : Optional[Any] =hidden_size
__lowerCamelCase : Optional[int] =num_hidden_layers
__lowerCamelCase : Optional[Any] =num_attention_heads
__lowerCamelCase : List[Any] =hidden_act
__lowerCamelCase : Optional[int] =intermediate_size
__lowerCamelCase : Tuple =hidden_dropout_prob
__lowerCamelCase : List[Any] =attention_probs_dropout_prob
__lowerCamelCase : str =max_position_embeddings
__lowerCamelCase : Optional[int] =type_vocab_size
__lowerCamelCase : Optional[int] =initializer_range
__lowerCamelCase : Tuple =layer_norm_eps
__lowerCamelCase : Dict =position_embedding_type
__lowerCamelCase : Optional[int] =use_cache
__lowerCamelCase : str =classifier_dropout
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
def __lowercase ( self :Dict ):
if self.task == "multiple-choice":
__lowerCamelCase : Tuple ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 179
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Dict ):
__lowerCamelCase : Union[str, Any] =inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Union[str, Any] =os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps''', '''test_metrics.py'''] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
__lowerCamelCase : List[str] =test_metrics
@require_cpu
def __lowercase ( self :Optional[Any] ):
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def __lowercase ( self :int ):
debug_launcher(self.test_metrics.main )
@require_single_gpu
def __lowercase ( self :int ):
self.test_metrics.main()
@require_multi_gpu
def __lowercase ( self :Dict ):
print(f'Found {torch.cuda.device_count()} devices.' )
__lowerCamelCase : Union[str, Any] =['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__lowercase , env=os.environ.copy() )
| 179
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
snake_case__ : Any = num_of_nodes
snake_case__ : list[list[int]] = []
snake_case__ : dict[int, int] = {}
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self , lowerCamelCase ) -> int:
"""simple docstring"""
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self , lowerCamelCase ) -> None:
"""simple docstring"""
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case__ : str = self.find_component(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> None:
"""simple docstring"""
if component_size[u_node] <= component_size[v_node]:
snake_case__ : str = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase )
elif component_size[u_node] >= component_size[v_node]:
snake_case__ : Union[str, Any] = self.find_component(lowerCamelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase )
def lowercase__ ( self ) -> None:
"""simple docstring"""
snake_case__ : str = []
snake_case__ : int = 0
snake_case__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case__ : str = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case__ : Tuple = edge
snake_case__ : List[Any] = self.m_component[u]
snake_case__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case__ : Any = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ : Union[str, Any] = edge
snake_case__ : int = self.m_component[u]
snake_case__ : List[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase , lowerCamelCase , lowerCamelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
snake_case__ : Dict = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def _A ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowerCAmelCase : str = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowerCAmelCase : Optional[int] = parser.parse_args()
_lowerCAmelCase : Union[str, Any] = "cpu"
_lowerCAmelCase : List[str] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowerCAmelCase : Union[str, Any] = "path-to-your-trained-model"
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowerCAmelCase : Optional[Any] = pipe.to(device)
# to channels last
_lowerCAmelCase : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
_lowerCAmelCase : str = pipe.vae.to(memory_format=torch.channels_last)
_lowerCAmelCase : List[Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[Any] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowerCAmelCase : Optional[int] = torch.randn(2, 4, 6_4, 6_4)
_lowerCAmelCase : List[str] = torch.rand(1) * 9_9_9
_lowerCAmelCase : Optional[int] = torch.randn(2, 7_7, 7_6_8)
_lowerCAmelCase : List[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowerCAmelCase : Union[str, Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowerCAmelCase : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowerCAmelCase : List[str] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowerCAmelCase : Tuple = 6_6_6
_lowerCAmelCase : str = torch.Generator(device).manual_seed(seed)
_lowerCAmelCase : Dict = {"generator": generator}
if args.steps is not None:
_lowerCAmelCase : Tuple = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowerCAmelCase : Any = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 694
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__lowerCAmelCase : List[str] =True
except (ImportError, ModuleNotFoundError):
__lowerCAmelCase : int =False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def UpperCAmelCase__ ( lowerCAmelCase__ :str ) -> Any:
'''simple docstring'''
re.sub("""<n>""" , """""" , lowerCAmelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowerCAmelCase__ ) )
| 359
|
a__ = [0, 2, 4, 6, 8]
a__ = [1, 3, 5, 7, 9]
def _UpperCAmelCase ( a : int , a : int , a : list[int] , a : int ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 10
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ = 0
for digit in range(10 ):
snake_case__ = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 10 , a , a )
return result
snake_case__ = 0
for digita in range(10 ):
snake_case__ = digita
if (remainder + digita) % 2 == 0:
snake_case__ = ODD_DIGITS
else:
snake_case__ = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 10 , a , a , )
return result
def _UpperCAmelCase ( a : int = 9 ):
snake_case__ = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(a , 0 , [0] * length , a )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 654
| 0
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _UpperCamelCase ( self : int ):
'''simple docstring'''
A__ : Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
A__ : Tuple = DisjunctiveConstraint(UpperCamelCase_ )
self.assertTrue(isinstance(dc.token_ids , UpperCamelCase_ ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(UpperCamelCase_ ):
DisjunctiveConstraint(UpperCamelCase_ ) # fails here
def _UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
A__ : List[Any] = [[1, 2, 3], [1, 2, 4]]
A__ : Optional[int] = DisjunctiveConstraint(UpperCamelCase_ )
A__ : Any = dc.update(1 )
A__ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ : Dict = dc.update(2 )
A__ : int = stepped is True and completed is False and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ : Optional[int] = dc.update(3 )
A__ : str = stepped is True and completed is True and reset is False
self.assertTrue(UpperCamelCase_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def _UpperCamelCase ( self : str ):
'''simple docstring'''
A__ : str = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
A__ : Union[str, Any] = DisjunctiveConstraint(UpperCamelCase_ )
A__ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
A__ : List[str] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
A__ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
A__ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
A__ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
A__ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
A__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 704
|
"""simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ) ->str:
for attribute in key.split(""".""" ):
A__ : Any = getattr(UpperCAmelCase__, UpperCAmelCase__ )
if weight_type is not None:
A__ : Union[str, Any] = getattr(UpperCAmelCase__, UpperCAmelCase__ ).shape
else:
A__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A__ : List[Any] = value
elif weight_type == "weight_g":
A__ : Any = value
elif weight_type == "weight_v":
A__ : Optional[int] = value
elif weight_type == "bias":
A__ : Any = value
else:
A__ : Any = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Any ) ->Union[str, Any]:
A__ : Dict = []
A__ : int = fairseq_model.state_dict()
A__ : Dict = hf_model.feature_extractor
A__ : Any = hf_model.adapter
for name, value in fairseq_dict.items():
A__ : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, hf_model.config.feat_extract_norm == """group""", )
A__ : Union[str, Any] = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A__ : int = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
A__ : List[Any] = True
if "*" in mapped_key:
A__ : Union[str, Any] = name.split(UpperCAmelCase__ )[0].split(""".""" )[-2]
A__ : Union[str, Any] = mapped_key.replace("""*""", UpperCAmelCase__ )
if "weight_g" in name:
A__ : List[str] = """weight_g"""
elif "weight_v" in name:
A__ : Optional[Any] = """weight_v"""
elif "bias" in name:
A__ : Optional[int] = """bias"""
elif "weight" in name:
A__ : int = """weight"""
else:
A__ : Optional[Any] = None
set_recursively(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def _lowerCAmelCase ( UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Any ) ->List[Any]:
A__ : Dict = full_name.split("""conv_layers.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
A__ : int = int(items[0] )
A__ : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A__ : Optional[Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A__ : Optional[int] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A__ : Optional[int] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A__ : Any = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str], UpperCAmelCase__ : Dict ) ->str:
A__ : Tuple = full_name.split("""adaptor.""" )[-1]
A__ : Optional[int] = name.split(""".""" )
if items[1].isdigit():
A__ : Optional[Any] = int(items[1] )
else:
A__ : Any = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
A__ : Tuple = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
A__ : str = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
A__ : Union[str, Any] = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
A__ : Dict = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
A__ : Optional[int] = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCAmelCase__ )
def _lowerCAmelCase ( UpperCAmelCase__ : Union[str, Any] ) ->List[Any]:
A__ , A__ : Union[str, Any] = emb.weight.shape
A__ : List[str] = nn.Linear(UpperCAmelCase__, UpperCAmelCase__, bias=UpperCAmelCase__ )
A__ : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCAmelCase ( UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Any, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Tuple, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : int, UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, ) ->str:
A__ : Tuple = WavaVecaConfig.from_pretrained(
UpperCAmelCase__, add_adapter=UpperCAmelCase__, adapter_stride=UpperCAmelCase__, adapter_kernel_size=UpperCAmelCase__, use_auth_token=UpperCAmelCase__, output_hidden_size=UpperCAmelCase__, )
A__ : List[Any] = MBartConfig.from_pretrained(UpperCAmelCase__ )
# load model
A__ , A__ , A__ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
}, )
A__ : List[Any] = model[0].eval()
# load feature extractor
A__ : Any = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase__, use_auth_token=UpperCAmelCase__ )
# set weights for wav2vec2 encoder
A__ : Dict = WavaVecaModel(UpperCAmelCase__ )
recursively_load_weights_wavaveca(model.encoder, UpperCAmelCase__ )
# load decoder weights
A__ : Any = MBartForCausalLM(UpperCAmelCase__ )
A__ , A__ : Union[str, Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=UpperCAmelCase__ )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
A__ : Dict = SpeechEncoderDecoderModel(encoder=UpperCAmelCase__, decoder=UpperCAmelCase__ )
A__ : Optional[Any] = False
A__ : Optional[Any] = MBartaaTokenizer(UpperCAmelCase__ )
tokenizer.save_pretrained(UpperCAmelCase__ )
A__ : Dict = hf_wavavec.config.to_dict()
A__ : List[Any] = tokenizer.pad_token_id
A__ : Optional[Any] = tokenizer.bos_token_id
A__ : List[Any] = tokenizer.eos_token_id
A__ : Tuple = """mbart50"""
A__ : List[str] = """wav2vec2"""
A__ : Optional[int] = tokenizer.eos_token_id
A__ : int = 2_5_0_0_0_4
A__ : Dict = tokenizer.eos_token_id
A__ : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase__ )
hf_wavavec.save_pretrained(UpperCAmelCase__ )
feature_extractor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_yaml_path''', default=None, type=str, help='''Path to yaml file of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-xls-r-1b''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/mbart-large-50-one-to-many-mmt''',
type=str,
help='''Path to hf decoder checkpoint config''',
)
parser.add_argument('''--add_adapter''', default=True, type=bool, help='''whethere to add model adapter layers''')
parser.add_argument('''--adapter_stride''', default=2, type=int, help='''stride of adapter layers''')
parser.add_argument('''--adapter_kernel_size''', default=3, type=int, help='''kernel size of adapter layers''')
parser.add_argument('''--encoder_output_dim''', default=1024, type=int, help='''encoder output dim''')
parser.add_argument('''--start_token_id''', default=25_0004, type=int, help='''`decoder_start_token_id` of model config''')
A_ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 498
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
_lowerCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : Tuple = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=8 ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCAmelCase__ = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ ,a_ ,a_ ,a_ ,):
"""simple docstring"""
super().__init__()
self.register_modules(
text_encoder=a_ ,tokenizer=a_ ,unet=a_ ,scheduler=a_ ,movq=a_ ,)
lowerCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
if latents is None:
lowerCAmelCase__ = randn_tensor(a_ ,generator=a_ ,device=a_ ,dtype=a_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
lowerCAmelCase__ = latents.to(a_ )
lowerCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ,a_=None ,):
"""simple docstring"""
lowerCAmelCase__ = len(a_ ) if isinstance(a_ ,a_ ) else 1
# get prompt text embeddings
lowerCAmelCase__ = self.tokenizer(
a_ ,padding='max_length' ,truncation=a_ ,max_length=77 ,return_attention_mask=a_ ,add_special_tokens=a_ ,return_tensors='pt' ,)
lowerCAmelCase__ = text_inputs.input_ids
lowerCAmelCase__ = self.tokenizer(a_ ,padding='longest' ,return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(a_ ,a_ ):
lowerCAmelCase__ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
lowerCAmelCase__ = text_input_ids.to(a_ )
lowerCAmelCase__ = text_inputs.attention_mask.to(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = self.text_encoder(
input_ids=a_ ,attention_mask=a_ )
lowerCAmelCase__ = prompt_embeds.repeat_interleave(a_ ,dim=0 )
lowerCAmelCase__ = text_encoder_hidden_states.repeat_interleave(a_ ,dim=0 )
lowerCAmelCase__ = text_mask.repeat_interleave(a_ ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ = 42
if negative_prompt is None:
lowerCAmelCase__ = [''] * batch_size
elif type(a_ ) is not type(a_ ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !='
f' {type(a_ )}.' )
elif isinstance(a_ ,a_ ):
lowerCAmelCase__ = [negative_prompt]
elif batch_size != len(a_ ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
lowerCAmelCase__ = negative_prompt
lowerCAmelCase__ = self.tokenizer(
a_ ,padding='max_length' ,max_length=77 ,truncation=a_ ,return_attention_mask=a_ ,add_special_tokens=a_ ,return_tensors='pt' ,)
lowerCAmelCase__ = uncond_input.input_ids.to(a_ )
lowerCAmelCase__ = uncond_input.attention_mask.to(a_ )
lowerCAmelCase__ , lowerCAmelCase__ = self.text_encoder(
input_ids=a_ ,attention_mask=a_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCAmelCase__ = negative_prompt_embeds.shape[1]
lowerCAmelCase__ = negative_prompt_embeds.repeat(1 ,a_ )
lowerCAmelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt ,a_ )
lowerCAmelCase__ = uncond_text_encoder_hidden_states.shape[1]
lowerCAmelCase__ = uncond_text_encoder_hidden_states.repeat(1 ,a_ ,1 )
lowerCAmelCase__ = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt ,a_ ,-1 )
lowerCAmelCase__ = uncond_text_mask.repeat_interleave(a_ ,dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCAmelCase__ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCAmelCase__ = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def SCREAMING_SNAKE_CASE_ ( self ,a_=0 ):
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
lowerCAmelCase__ = torch.device(f'cuda:{gpu_id}' )
lowerCAmelCase__ = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ,a_=0 ):
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
lowerCAmelCase__ = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=a_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCAmelCase__ = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCAmelCase__ , lowerCAmelCase__ = cpu_offload_with_hook(a_ ,a_ ,prev_module_hook=a_ )
if self.safety_checker is not None:
lowerCAmelCase__ , lowerCAmelCase__ = cpu_offload_with_hook(self.safety_checker ,a_ ,prev_module_hook=a_ )
# We'll offload the last model manually.
lowerCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(a_ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(a_ )
def __call__( self ,a_ ,a_ ,a_ ,a_ = None ,a_ = 512 ,a_ = 512 ,a_ = 100 ,a_ = 4.0 ,a_ = 1 ,a_ = None ,a_ = None ,a_ = "pil" ,a_ = True ,):
"""simple docstring"""
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = 1
elif isinstance(a_ ,a_ ):
lowerCAmelCase__ = len(a_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(a_ )}' )
lowerCAmelCase__ = self._execution_device
lowerCAmelCase__ = batch_size * num_images_per_prompt
lowerCAmelCase__ = guidance_scale > 1.0
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = self._encode_prompt(
a_ ,a_ ,a_ ,a_ ,a_ )
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = torch.cat(a_ ,dim=0 )
if isinstance(a_ ,a_ ):
lowerCAmelCase__ = torch.cat(a_ ,dim=0 )
if do_classifier_free_guidance:
lowerCAmelCase__ = image_embeds.repeat_interleave(a_ ,dim=0 )
lowerCAmelCase__ = negative_image_embeds.repeat_interleave(a_ ,dim=0 )
lowerCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(
dtype=prompt_embeds.dtype ,device=a_ )
self.scheduler.set_timesteps(a_ ,device=a_ )
lowerCAmelCase__ = self.scheduler.timesteps
lowerCAmelCase__ = self.unet.config.in_channels
lowerCAmelCase__ , lowerCAmelCase__ = get_new_h_w(a_ ,a_ ,self.movq_scale_factor )
# create initial latent
lowerCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) ,text_encoder_hidden_states.dtype ,a_ ,a_ ,a_ ,self.scheduler ,)
for i, t in enumerate(self.progress_bar(a_ ) ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
lowerCAmelCase__ = self.unet(
sample=a_ ,timestep=a_ ,encoder_hidden_states=a_ ,added_cond_kwargs=a_ ,return_dict=a_ ,)[0]
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ , lowerCAmelCase__ = variance_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCAmelCase__ = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(
a_ ,a_ ,a_ ,generator=a_ ,).prev_sample
# post-processing
lowerCAmelCase__ = self.movq.decode(a_ ,force_not_quantize=a_ )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
lowerCAmelCase__ = image * 0.5 + 0.5
lowerCAmelCase__ = image.clamp(0 ,1 )
lowerCAmelCase__ = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 193
|
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193
| 1
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'):
_SCREAMING_SNAKE_CASE : List[Any] = True
from torch.cuda.amp import autocast
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class A :
'''simple docstring'''
lowerCamelCase : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
lowerCamelCase : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
lowerCamelCase : Optional[bool] = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
lowerCamelCase : Optional[bool] = field(
default=lowerCamelCase_ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
lowerCamelCase : Optional[float] = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
lowerCamelCase : Optional[float] = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
lowerCamelCase : Optional[float] = field(
default=0.999_995 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_lowercase: Union[str, Any] = logging.WARNING
if model_args.verbose_logging:
_lowercase: int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
_lowercase: Tuple = logging.INFO
logger.setLevel(__magic_name__ )
@dataclass
class A :
'''simple docstring'''
lowerCamelCase : str = field(
default=lowerCamelCase_ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
lowerCamelCase : Optional[str] = field(
default=lowerCamelCase_ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
lowerCamelCase : Optional[str] = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
lowerCamelCase : Optional[str] = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
lowerCamelCase : Optional[str] = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
lowerCamelCase : bool = field(
default=lowerCamelCase_ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
lowerCamelCase : Optional[int] = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
lowerCamelCase : Optional[int] = field(
default=lowerCamelCase_ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
lowerCamelCase : Optional[float] = field(
default=20.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class A :
'''simple docstring'''
lowerCamelCase : WavaVecaForPreTraining
lowerCamelCase : WavaVecaFeatureExtractor
lowerCamelCase : Union[bool, str] = "longest"
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
def __call__( self : Dict , _UpperCamelCase : List[Dict[str, Union[List[int], torch.Tensor]]]):
# reformat list to dict and set to pytorch format
_lowercase: Union[str, Any] = self.feature_extractor.pad(
_UpperCamelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
_lowercase: Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1])
_lowercase: str = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
_lowercase: Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to(
torch.long)
_lowercase: Optional[int] = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch["input_values"].device)
# these two operations makes sure that all values
# before the output lengths indices are attended to
_lowercase: int = 1
_lowercase: Optional[Any] = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool()
# sample randomly masked indices
_lowercase: str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=_UpperCamelCase , min_masks=2 , )
return batch
class A ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self : int , *_UpperCamelCase : Any , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Dict=0 , _UpperCamelCase : Union[str, Any]=1.0 , **_UpperCamelCase : List[str]):
super().__init__(*_UpperCamelCase , **_UpperCamelCase)
_lowercase: Tuple = 0
_lowercase: Optional[Any] = max_gumbel_temp
_lowercase: Optional[Any] = min_gumbel_temp
_lowercase: List[Any] = gumbel_temp_decay
def UpperCAmelCase__ ( self : Optional[Any] , _UpperCamelCase : nn.Module , _UpperCamelCase : Dict[str, Union[torch.Tensor, Any]]):
model.train()
_lowercase: Union[str, Any] = self._prepare_inputs(_UpperCamelCase)
if self.use_amp:
with autocast():
_lowercase: Optional[Any] = self.compute_loss(_UpperCamelCase , _UpperCamelCase)
else:
_lowercase: Tuple = self.compute_loss(_UpperCamelCase , _UpperCamelCase)
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
_lowercase: int = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
_lowercase: List[str] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']")
if self.args.gradient_accumulation_steps > 1:
_lowercase: Optional[Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_UpperCamelCase).backward()
elif self.use_apex:
with amp.scale_loss(_UpperCamelCase , self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_UpperCamelCase)
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp))
return loss.detach()
def __lowerCAmelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_lowercase: Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_lowercase , _lowercase , _lowercase: int = parser.parse_args_into_dataclasses()
configure_logger(__magic_name__ , __magic_name__ )
# Downloading and loading a dataset from the hub.
_lowercase: Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
_lowercase: int = DatasetDict()
_lowercase: List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]" , cache_dir=model_args.cache_dir , )
_lowercase: Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
_lowercase: Any = DatasetDict()
_lowercase: Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
_lowercase: Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f"{data_args.train_split_name}" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
_lowercase: List[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=__magic_name__ )
def prepare_dataset(__magic_name__ ):
# check that all files have the correct sampling rate
_lowercase , _lowercase: List[str] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
_lowercase: Any = datasets.map(
__magic_name__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names )
# filter audio files that are too long
_lowercase: List[str] = vectorized_datasets.filter(
lambda __magic_name__ : len(data["speech"] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(__magic_name__ ):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
_lowercase: List[str] = vectorized_datasets.map(
__magic_name__ , batched=__magic_name__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
_lowercase: List[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'" )
_lowercase: Tuple = WavaVecaForPreTraining(__magic_name__ )
_lowercase: Union[str, Any] = DataCollatorForWavaVecaPretraining(model=__magic_name__ , feature_extractor=__magic_name__ )
_lowercase: int = WavaVecaPreTrainer(
model=__magic_name__ , data_collator=__magic_name__ , args=__magic_name__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=__magic_name__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 206
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = """data2vec-text"""
def __init__( self : List[str] , _UpperCamelCase : List[str]=30_522 , _UpperCamelCase : Union[str, Any]=768 , _UpperCamelCase : Dict=12 , _UpperCamelCase : Optional[Any]=12 , _UpperCamelCase : Optional[Any]=3_072 , _UpperCamelCase : List[Any]="gelu" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Dict=0.1 , _UpperCamelCase : Dict=512 , _UpperCamelCase : Optional[Any]=2 , _UpperCamelCase : Any=0.0_2 , _UpperCamelCase : Dict=1e-12 , _UpperCamelCase : Union[str, Any]=1 , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Dict=2 , _UpperCamelCase : Optional[Any]="absolute" , _UpperCamelCase : Any=True , _UpperCamelCase : List[str]=None , **_UpperCamelCase : Tuple , ):
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase)
_lowercase: str = vocab_size
_lowercase: Tuple = hidden_size
_lowercase: Optional[int] = num_hidden_layers
_lowercase: Optional[Any] = num_attention_heads
_lowercase: Any = hidden_act
_lowercase: Any = intermediate_size
_lowercase: List[Any] = hidden_dropout_prob
_lowercase: Optional[int] = attention_probs_dropout_prob
_lowercase: Optional[Any] = max_position_embeddings
_lowercase: str = type_vocab_size
_lowercase: List[Any] = initializer_range
_lowercase: List[str] = layer_norm_eps
_lowercase: int = position_embedding_type
_lowercase: Union[str, Any] = use_cache
_lowercase: Any = classifier_dropout
class A ( lowerCamelCase_ ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Optional[Any]):
if self.task == "multiple-choice":
_lowercase: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowercase: Dict = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 206
| 1
|
"""simple docstring"""
from math import pow, sqrt
def UpperCAmelCase ( *_lowercase : str ) -> bool:
"""simple docstring"""
lowerCAmelCase_ = len(_A ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase ( _lowercase : Union[str, Any] , _lowercase : Optional[int] ) -> float | ValueError:
"""simple docstring"""
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_A , _A )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def UpperCAmelCase ( _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[Any] ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_A , _A , _A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase ( _lowercase : int , _lowercase : List[Any] , _lowercase : str ) -> float | ValueError:
"""simple docstring"""
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_A , _A , _A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase ( _lowercase : List[str] , _lowercase : int , _lowercase : Tuple ) -> float | ValueError:
"""simple docstring"""
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_A , _A , _A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def UpperCAmelCase ( _lowercase : int , _lowercase : Tuple , _lowercase : int ) -> float | ValueError:
"""simple docstring"""
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_A , _A , _A )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 552
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : List[Any] = logging.get_logger(__name__)
a : str = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : Any = 'camembert'
def __init__( self : Dict , __UpperCamelCase : Union[str, Any]=3_05_22 , __UpperCamelCase : Tuple=7_68 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : int=12 , __UpperCamelCase : int=30_72 , __UpperCamelCase : int="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[Any]=5_12 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Optional[int]=1e-12 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : int=0 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : Optional[int]="absolute" , __UpperCamelCase : int=True , __UpperCamelCase : Dict=None , **__UpperCamelCase : List[str] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class a_ ( _UpperCAmelCase ):
@property
def _snake_case ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 555
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
_lowercase = logging.getLogger(__name__)
@dataclass
class a_ :
lowercase_ : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
lowercase_ : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
@dataclass
class a_ :
lowercase_ : Optional[str] = field(default=UpperCAmelCase__ , metadata={'''help''': '''The input training data file (a text file).'''} )
lowercase_ : Optional[str] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. If passed, sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowercase_ : bool = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''Whether to pad all samples to the maximum sentence length. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch. More '''
'''efficient on GPU but very bad for TPU.'''
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
lowercase_ : Optional[int] = field(
default=UpperCAmelCase__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
def lowercase__ ( self : List[Any] ):
if self.train_file is not None:
__snake_case = self.train_file.split('.' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__snake_case = self.validation_file.split('.' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class a_ :
lowercase_ : PreTrainedTokenizerBase
lowercase_ : Union[bool, str, PaddingStrategy] = True
lowercase_ : Optional[int] = None
lowercase_ : Optional[int] = None
def __call__( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
__snake_case = 'label' if 'label' in features[0].keys() else 'labels'
__snake_case = [feature.pop(__lowerCAmelCase ) for feature in features]
__snake_case = len(__lowerCAmelCase )
__snake_case = len(features[0]['input_ids'] )
__snake_case = [
[{k: v[i] for k, v in feature.items()} for i in range(__lowerCAmelCase )] for feature in features
]
__snake_case = list(chain(*__lowerCAmelCase ) )
__snake_case = self.tokenizer.pad(
__lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' , )
# Un-flatten
__snake_case = {k: v.view(__lowerCAmelCase , __lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
__snake_case = torch.tensor(__lowerCAmelCase , dtype=torch.intaa )
return batch
def lowerCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_swag' , a , a )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(a )
datasets.utils.logging.set_verbosity(a )
transformers.utils.logging.set_verbosity(a )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__snake_case = {}
if data_args.train_file is not None:
__snake_case = data_args.train_file
if data_args.validation_file is not None:
__snake_case = data_args.validation_file
__snake_case = data_args.train_file.split('.' )[-1]
__snake_case = load_dataset(
a , data_files=a , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__snake_case = load_dataset(
'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__snake_case = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=a , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__snake_case = [f'ending{i}' for i in range(4 )]
__snake_case = 'sent1'
__snake_case = 'sent2'
if data_args.max_seq_length is None:
__snake_case = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'
' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'
' override this default with `--block_size xxx`.' )
__snake_case = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__snake_case = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(a ):
__snake_case = [[context] * 4 for context in examples[context_name]]
__snake_case = examples[question_header_name]
__snake_case = [
[f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(a )
]
# Flatten out
__snake_case = list(chain(*a ) )
__snake_case = list(chain(*a ) )
# Tokenize
__snake_case = tokenizer(
a , a , truncation=a , max_length=a , padding='max_length' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(a ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__snake_case = raw_datasets['train']
if data_args.max_train_samples is not None:
__snake_case = min(len(a ) , data_args.max_train_samples )
__snake_case = train_dataset.select(range(a ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
__snake_case = train_dataset.map(
a , batched=a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__snake_case = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__snake_case = min(len(a ) , data_args.max_eval_samples )
__snake_case = eval_dataset.select(range(a ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
__snake_case = eval_dataset.map(
a , batched=a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__snake_case = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=a , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(a ):
__snake_case , __snake_case = eval_predictions
__snake_case = np.argmax(a , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__snake_case = Trainer(
model=a , args=a , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=a , data_collator=a , compute_metrics=a , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=a )
trainer.save_model() # Saves the tokenizer too for easy upload
__snake_case = train_result.metrics
__snake_case = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a )
)
__snake_case = min(a , len(a ) )
trainer.log_metrics('train' , a )
trainer.save_metrics('train' , a )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__snake_case = trainer.evaluate()
__snake_case = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a )
__snake_case = min(a , len(a ) )
trainer.log_metrics('eval' , a )
trainer.save_metrics('eval' , a )
__snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'multiple-choice',
'dataset_tags': 'swag',
'dataset_args': 'regular',
'dataset': 'SWAG',
'language': 'en',
}
if training_args.push_to_hub:
trainer.push_to_hub(**a )
else:
trainer.create_model_card(**a )
def lowerCamelCase__ ( a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 427
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
__snake_case = 0
@slow
def lowercase__ ( self : str ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowercase__ ( self : Any ):
__snake_case = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : int ):
with pytest.raises(__lowerCAmelCase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowercase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowercase__ ( self : Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__snake_case = TOKENIZER_MAPPING.values()
__snake_case = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : List[str] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__lowerCAmelCase )
__snake_case = 'Hello, world. How are you?'
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
__snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__lowerCAmelCase )
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowercase__ ( self : List[str] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
__snake_case = get_tokenizer_config('bert-base-cased' )
__snake_case = config.pop('_commit_hash' , __lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCAmelCase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__snake_case = get_tokenizer_config(__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = get_tokenizer_config(__lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
__snake_case = CustomTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = BertTokenizerFast.from_pretrained(__lowerCAmelCase )
bert_tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = CustomTokenizerFast.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowercase__ ( self : str ):
class a_ ( UpperCAmelCase__ ):
lowercase_ : str = False
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[Any] = NewTokenizer
lowercase_ : str = False
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# If remote code is not set, the default is to use local
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__snake_case = AutoTokenizer.from_pretrained('bert-base' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
__lowerCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision='aaaaaa' )
def lowercase__ ( self : int ):
# Make sure we have cached the tokenizer.
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 427
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 37
|
'''simple docstring'''
from math import pi
def UpperCamelCase__ ( _lowercase : int , _lowercase : int ) -> float:
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 523
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class __a (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Any = IFPipeline
_SCREAMING_SNAKE_CASE :str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
_SCREAMING_SNAKE_CASE :Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_SCREAMING_SNAKE_CASE :Any = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
return self._get_dummy_components()
def _a ( self , _a , _a=0 ) -> int:
"""simple docstring"""
if str(_a ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_a )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device=_a ).manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def _a ( self ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _a ( self ) -> List[Any]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def _a ( self ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a ( self ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : str = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=_a , tokenizer=_a )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
SCREAMING_SNAKE_CASE__ : List[str] = IFImgaImgPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : int = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(_a , _a , _a , _a )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
SCREAMING_SNAKE_CASE__ : Dict = IFInpaintingPipeline(**pipe_a.components )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(_a , _a , _a , _a )
def _a ( self , _a , _a , _a , _a ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
SCREAMING_SNAKE_CASE__ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _a ( self , _a , _a , _a , _a ) -> Optional[Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : List[str] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _a ( self , _a , _a , _a , _a ) -> Union[str, Any]:
"""simple docstring"""
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Dict = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , num_inference_steps=2 , generator=_a , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(_a , _a )
# pipeline 2
_start_torch_memory_measurement()
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device="""cpu""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(_a )
SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_a(
prompt_embeds=_a , negative_prompt_embeds=_a , image=_a , mask_image=_a , original_image=_a , generator=_a , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ : Any = output.images[0]
assert image.shape == (256, 256, 3)
SCREAMING_SNAKE_CASE__ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
SCREAMING_SNAKE_CASE__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(_a , _a )
def _lowercase ( ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 12
|
"""simple docstring"""
import os
a :List[str] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def _lowercase ( __lowerCAmelCase ) -> int:
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Dict = 0
while index < len(__lowerCAmelCase ) - 1:
SCREAMING_SNAKE_CASE__ : List[Any] = SYMBOLS[numerals[index]]
SCREAMING_SNAKE_CASE__ : Dict = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def _lowercase ( __lowerCAmelCase ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] = """"""
SCREAMING_SNAKE_CASE__ : int = num // 1000
numerals += m_count * "M"
num %= 1000
SCREAMING_SNAKE_CASE__ : List[str] = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
SCREAMING_SNAKE_CASE__ : List[Any] = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def _lowercase ( __lowerCAmelCase = "/p089_roman.txt" ) -> int:
SCREAMING_SNAKE_CASE__ : int = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
SCREAMING_SNAKE_CASE__ : str = filea.readlines()
for line in lines:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = line.strip()
SCREAMING_SNAKE_CASE__ : Dict = parse_roman_numerals(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : str = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 12
| 1
|
"""simple docstring"""
import os
import string
import sys
_A = 1 << 8
_A = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_A = KEYMAP["""up"""]
_A = KEYMAP["""left"""]
if sys.platform == "win32":
_A = []
_A = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_A = ord(str(i))
def a__ ( ) -> List[str]:
if os.name == "nt":
import msvcrt
UpperCAmelCase__ : Optional[Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__A ) == 0:
# Read the keystroke
UpperCAmelCase__ : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
UpperCAmelCase__ : int = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
UpperCAmelCase__ : List[str] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__A )
if ord(__A ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_26 ) )
UpperCAmelCase__ : Tuple = chr(KEYMAP["""esc"""] )
except KeyError:
UpperCAmelCase__ : Any = cha[1]
else:
UpperCAmelCase__ : int = ch.decode(__A )
else:
UpperCAmelCase__ : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
UpperCAmelCase__ : Dict = sys.stdin.fileno()
UpperCAmelCase__ : Optional[Any] = termios.tcgetattr(__A )
try:
tty.setraw(__A )
UpperCAmelCase__ : Tuple = sys.stdin.read(1 )
finally:
termios.tcsetattr(__A , termios.TCSADRAIN , __A )
return ch
def a__ ( ) -> Tuple:
UpperCAmelCase__ : List[Any] = get_raw_chars()
if ord(__A ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__A ) == KEYMAP["esc"]:
UpperCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(__A ) == KEYMAP["mod_int"]:
UpperCAmelCase__ : Union[str, Any] = get_raw_chars()
if ord(__A ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__A ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__A ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 182
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = r'''.*/layers_(\d+)'''
_a = key
if re.match(__A , __A):
_a = re.sub(r'''layers_(\d+)''' , r'''block/\1/layer''' , __A)
_a = r'''(encoder|decoder)\/'''
if re.match(__A , __A):
_a = re.match(__A , __A).groups()
if groups[0] == "encoder":
_a = re.sub(r'''/mlp/''' , r'''/1/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/1/layer_norm/''' , __A)
elif groups[0] == "decoder":
_a = re.sub(r'''/mlp/''' , r'''/2/mlp/''' , __A)
_a = re.sub(r'''/pre_mlp_layer_norm/''' , r'''/2/layer_norm/''' , __A)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'''
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(__A):
_a = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(__A)
return s_dict
lowercase_ = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCAmelCase (__A , __A):
"""simple docstring"""
import regex as re
with open(__A , '''r''') as f:
_a = f.read()
_a = re.findall(r'''(.*) = ([0-9.]*)''' , __A)
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(__A) if '''.''' in value else int(__A)
_a = re.findall(r'''(.*activations) = \(\'(.*)\',\)''' , __A)[0]
_a = str(activation[1])
_a = num_experts
_a = SwitchTransformersConfig(**__A)
return config
def lowerCAmelCase (__A , __A , __A=None , __A="./" , __A=8):
"""simple docstring"""
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
_a = checkpoints.load_tax_checkpoint(__A)
if gin_file is not None:
_a = convert_gin_to_config(__A , __A)
else:
_a = SwitchTransformersConfig.from_pretrained(__A)
_a = SwitchTransformersForConditionalGeneration(__A)
_a = flax_params['''target''']
_a = flatten_dict(__A , sep='''/''')
_a = rename_keys(__A)
_a = unflatten_dict(__A , sep='''/''')
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(__A , __A)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 11
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : int = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 718
|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ):
lowerCamelCase__ = data
# Initialize hash values
lowerCamelCase__ = [
0x6a_09_e6_67,
0xbb_67_ae_85,
0x3c_6e_f3_72,
0xa5_4f_f5_3a,
0x51_0e_52_7f,
0x9b_05_68_8c,
0x1f_83_d9_ab,
0x5b_e0_cd_19,
]
# Initialize round constants
lowerCamelCase__ = [
0x42_8a_2f_98,
0x71_37_44_91,
0xb5_c0_fb_cf,
0xe9_b5_db_a5,
0x39_56_c2_5b,
0x59_f1_11_f1,
0x92_3f_82_a4,
0xab_1c_5e_d5,
0xd8_07_aa_98,
0x12_83_5b_01,
0x24_31_85_be,
0x55_0c_7d_c3,
0x72_be_5d_74,
0x80_de_b1_fe,
0x9b_dc_06_a7,
0xc1_9b_f1_74,
0xe4_9b_69_c1,
0xef_be_47_86,
0x0f_c1_9d_c6,
0x24_0c_a1_cc,
0x2d_e9_2c_6f,
0x4a_74_84_aa,
0x5c_b0_a9_dc,
0x76_f9_88_da,
0x98_3e_51_52,
0xa8_31_c6_6d,
0xb0_03_27_c8,
0xbf_59_7f_c7,
0xc6_e0_0b_f3,
0xd5_a7_91_47,
0x06_ca_63_51,
0x14_29_29_67,
0x27_b7_0a_85,
0x2e_1b_21_38,
0x4d_2c_6d_fc,
0x53_38_0d_13,
0x65_0a_73_54,
0x76_6a_0a_bb,
0x81_c2_c9_2e,
0x92_72_2c_85,
0xa2_bf_e8_a1,
0xa8_1a_66_4b,
0xc2_4b_8b_70,
0xc7_6c_51_a3,
0xd1_92_e8_19,
0xd6_99_06_24,
0xf4_0e_35_85,
0x10_6a_a0_70,
0x19_a4_c1_16,
0x1e_37_6c_08,
0x27_48_77_4c,
0x34_b0_bc_b5,
0x39_1c_0c_b3,
0x4e_d8_aa_4a,
0x5b_9c_ca_4f,
0x68_2e_6f_f3,
0x74_8f_82_ee,
0x78_a5_63_6f,
0x84_c8_78_14,
0x8c_c7_02_08,
0x90_be_ff_fa,
0xa4_50_6c_eb,
0xbe_f9_a3_f7,
0xc6_71_78_f2,
]
lowerCamelCase__ = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCamelCase_ ( _lowerCAmelCase ):
lowerCamelCase__ = B"""\x80""" + (B"""\x00""" * (63 - (len(_lowerCAmelCase ) + 8) % 64))
lowerCamelCase__ = struct.pack(""">Q""" ,(len(_lowerCAmelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCamelCase_ ( self ):
# Convert into blocks of 64 bytes
lowerCamelCase__ = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCamelCase__ = list(struct.unpack(""">16L""" ,_lowerCAmelCase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCamelCase__ = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
lowerCamelCase__ = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
lowerCamelCase__ = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x1_00_00_00_00
# Compression
lowerCamelCase__ = self.ror(_lowerCAmelCase ,6 ) ^ self.ror(_lowerCAmelCase ,11 ) ^ self.ror(_lowerCAmelCase ,25 )
lowerCamelCase__ = (e & f) ^ ((~e & 0xff_ff_ff_ff) & g)
lowerCamelCase__ = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x1_00_00_00_00
lowerCamelCase__ = self.ror(_lowerCAmelCase ,2 ) ^ self.ror(_lowerCAmelCase ,13 ) ^ self.ror(_lowerCAmelCase ,22 )
lowerCamelCase__ = (a & b) ^ (a & c) ^ (b & c)
lowerCamelCase__ = (sa + maj) % 0x1_00_00_00_00
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = (
g,
f,
e,
((d + tempa) % 0x1_00_00_00_00),
c,
b,
a,
((tempa + tempa) % 0x1_00_00_00_00),
)
lowerCamelCase__ = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCamelCase__ = [
((element + mutated_hash_values[index]) % 0x1_00_00_00_00)
for index, element in enumerate(self.hashes )
]
lowerCamelCase__ = """""".join([hex(_lowerCAmelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase ):
return 0xff_ff_ff_ff & (value << (32 - rotations)) | (value >> rotations)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ):
import hashlib
lowerCamelCase__ = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(_lowerCAmelCase ).hash ,hashlib.shaaaa(_lowerCAmelCase ).hexdigest() )
def A__ ( ):
import doctest
doctest.testmod()
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCamelCase__ = f.read()
else:
lowerCamelCase__ = bytes(__lowerCAmelCase , """utf-8""" )
print(SHAaaa(__lowerCAmelCase ).hash )
if __name__ == "__main__":
main()
| 9
| 0
|
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
set_seed(770)
__A : Dict = {
"c_attn": "att_proj",
"c_proj": "out_proj",
"c_fc": "in_proj",
"transformer.": "",
"h.": "layers.",
"ln_1": "layernorm_1",
"ln_2": "layernorm_2",
"ln_f": "layernorm_final",
"wpe": "position_embeds_layer",
"wte": "input_embeds_layer",
}
__A : str = {
"text_small": {
"repo_id": "suno/bark",
"file_name": "text.pt",
},
"coarse_small": {
"repo_id": "suno/bark",
"file_name": "coarse.pt",
},
"fine_small": {
"repo_id": "suno/bark",
"file_name": "fine.pt",
},
"text": {
"repo_id": "suno/bark",
"file_name": "text_2.pt",
},
"coarse": {
"repo_id": "suno/bark",
"file_name": "coarse_2.pt",
},
"fine": {
"repo_id": "suno/bark",
"file_name": "fine_2.pt",
},
}
__A : Optional[int] = os.path.dirname(os.path.abspath(__file__))
__A : int = os.path.join(os.path.expanduser("~"), ".cache")
__A : Union[str, Any] = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0")
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
"""simple docstring"""
_A = model_type
if use_small:
key += "_small"
return os.path.join(_SCREAMING_SNAKE_CASE , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , local_dir=_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> int:
"""simple docstring"""
if model_type == "text":
_A = BarkSemanticModel
_A = BarkSemanticConfig
_A = BarkSemanticGenerationConfig
elif model_type == "coarse":
_A = BarkCoarseModel
_A = BarkCoarseConfig
_A = BarkCoarseGenerationConfig
elif model_type == "fine":
_A = BarkFineModel
_A = BarkFineConfig
_A = BarkFineGenerationConfig
else:
raise NotImplementedError()
_A = F"{model_type}_small" if use_small else model_type
_A = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_SCREAMING_SNAKE_CASE ):
logger.info(F"{model_type} model not found, downloading into `{CACHE_DIR}`." )
_download(model_info['repo_id'] , model_info['file_name'] )
_A = torch.load(_SCREAMING_SNAKE_CASE , map_location=_SCREAMING_SNAKE_CASE )
# this is a hack
_A = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_A = model_args['vocab_size']
_A = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_A = model_args.pop('n_head' )
_A = model_args.pop('n_embd' )
_A = model_args.pop('n_layer' )
_A = ConfigClass(**checkpoint['model_args'] )
_A = ModelClass(config=_SCREAMING_SNAKE_CASE )
_A = GenerationConfigClass()
_A = model_generation_config
_A = checkpoint['model']
# fixup checkpoint
_A = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(_SCREAMING_SNAKE_CASE ):
# replace part of the key with corresponding layer name in HF implementation
_A = k[len(_SCREAMING_SNAKE_CASE ) :]
for old_layer_name in new_layer_name_dict:
_A = new_k.replace(_SCREAMING_SNAKE_CASE , new_layer_name_dict[old_layer_name] )
_A = state_dict.pop(_SCREAMING_SNAKE_CASE )
_A = set(state_dict.keys() ) - set(model.state_dict().keys() )
_A = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_A = set(model.state_dict().keys() ) - set(state_dict.keys() )
_A = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"extra keys found: {extra_keys}" )
if len(_SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(F"missing keys: {missing_keys}" )
model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
_A = model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE )
_A = checkpoint['best_val_loss'].item()
logger.info(F"model loaded: {round(n_params/1e6 , 1 )}M params, {round(_SCREAMING_SNAKE_CASE , 3 )} loss" )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
del checkpoint, state_dict
return model
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="text" ) -> List[str]:
"""simple docstring"""
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_A = 'cpu' # do conversion on cpu
_A = _get_ckpt_path(_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
_A = _load_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
# load bark initial model
_A = _bark_load_model(_SCREAMING_SNAKE_CASE , 'cpu' , model_type=_SCREAMING_SNAKE_CASE , use_small=_SCREAMING_SNAKE_CASE )
if model_type == "text":
_A = bark_model['model']
if model.num_parameters(exclude_embeddings=_SCREAMING_SNAKE_CASE ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_A = 5
_A = 10
if model_type in ["text", "coarse"]:
_A = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_A = bark_model(_SCREAMING_SNAKE_CASE )[0]
_A = model(_SCREAMING_SNAKE_CASE )
# take last logits
_A = output_new_model_total.logits[:, [-1], :]
else:
_A = 3
_A = 8
_A = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_A = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = bark_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError('initial and new outputs are not equal' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Dict:
"""simple docstring"""
_A = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = BarkSemanticConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = BarkCoarseConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = BarkFineConfig.from_pretrained(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
_A = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_A = BarkSemanticModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = BarkCoarseModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = BarkFineModel.from_pretrained(_SCREAMING_SNAKE_CASE )
_A = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_A = BarkConfig.from_sub_model_configs(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_A = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_A = BarkModel(_SCREAMING_SNAKE_CASE )
_A = semantic
_A = coarseAcoustic
_A = fineAcoustic
_A = codec
_A = bark_generation_config
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
bark.save_pretrained(_SCREAMING_SNAKE_CASE , repo_id=_SCREAMING_SNAKE_CASE , push_to_hub=_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("model_type", type=str, help="text, coarse or fine.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.")
__A : Optional[int] = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 27
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27
| 1
|
from typing import List
from .keymap import KEYMAP, get_character
def A ( _lowerCamelCase ):
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCAmelCase : Dict = getattr(__UpperCamelCase , "handle_key" , [] )
handle += [key]
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
def A ( *_lowerCamelCase ):
'''simple docstring'''
def decorator(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = getattr(__UpperCamelCase , "handle_key" , [] )
handle += keys
setattr(__UpperCamelCase , "handle_key" , __UpperCamelCase )
return func
return decorator
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__):
def __new__( cls, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = super().__new__(cls, _lowercase, _lowercase, _lowercase)
if not hasattr(_lowercase, "key_handler"):
setattr(_lowercase, "key_handler", {})
setattr(_lowercase, "handle_input", KeyHandler.handle_input)
for value in attrs.values():
_lowerCAmelCase : List[str] = getattr(_lowercase, "handle_key", [])
for key in handled_keys:
_lowerCAmelCase : List[str] = value
return new_cls
@staticmethod
def snake_case__ ( cls):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_character()
if char != KEYMAP["undefined"]:
_lowerCAmelCase : int = ord(_lowercase)
_lowerCAmelCase : Tuple = cls.key_handler.get(_lowercase)
if handler:
_lowerCAmelCase : Union[str, Any] = char
return handler(cls)
else:
return None
def A ( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 721
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class A ( __UpperCAmelCase ):
@staticmethod
@abstractmethod
def lowerCamelCase ( lowercase_ : Dict ) -> Tuple:
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def lowerCamelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
| 464
|
'''simple docstring'''
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class A :
def __init__( self ) -> int:
'''simple docstring'''
lowercase__ = None
def __iter__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase__ = self.head
for _ in range(lowerCamelCase__ ):
lowercase__ = current.next
lowercase__ = data
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def A__ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _A ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _A ( ):
lowercase__ = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase__ )
print(f'''length of linked_list is : {len(lowercase__ )}''' )
if __name__ == "__main__":
main()
| 325
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , lowerCamelCase : Optional[int] )-> str:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
snake_case__ : Any = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(lowerCamelCase )
def __lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
snake_case__ : Dict = """sshleifer/tiny-gpt2"""
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : List[Any] = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : int )-> List[str]:
snake_case__ : Tuple = """sgugger/tiny-distilbert-classification"""
snake_case__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , only_pretrain_model=lowerCamelCase , )
snake_case__ : Any = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : Optional[Any] )-> str:
snake_case__ : str = """sshleifer/tiny-gpt2"""
snake_case__ : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , torchscript=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Tuple = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Tuple = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def __lowerCAmelCase ( self : List[str] )-> Dict:
snake_case__ : Dict = """sshleifer/tiny-gpt2"""
snake_case__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , fpaa=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Tuple = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Any = """sshleifer/tiny-gpt2"""
snake_case__ : int = AutoConfig.from_pretrained(lowerCamelCase )
# set architectures equal to `None`
snake_case__ : List[str] = None
snake_case__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Dict = PyTorchBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : Optional[Any] )-> Dict:
snake_case__ : Union[str, Any] = """sshleifer/tiny-gpt2"""
snake_case__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Union[str, Any] = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def __lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case__ : List[Any] = """sshleifer/tiny-gpt2"""
snake_case__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : str )-> Any:
snake_case__ : List[str] = """sshleifer/tiny-gpt2"""
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Any = PyTorchBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case__ : List[str] = """sshleifer/tinier_bart"""
snake_case__ : Tuple = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Tuple = PyTorchBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[Any] )-> str:
snake_case__ : List[str] = """sshleifer/tiny-gpt2"""
snake_case__ : Optional[int] = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : Optional[int] = PyTorchBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case__ : int = """sshleifer/tinier_bart"""
snake_case__ : Dict = AutoConfig.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowerCamelCase , )
snake_case__ : int = PyTorchBenchmark(lowerCamelCase , configs=[config] )
snake_case__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : List[Any] )-> Union[str, Any]:
snake_case__ : str = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , save_to_csv=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowerCamelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(lowerCamelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(lowerCamelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(lowerCamelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(lowerCamelCase , """env.csv""" ) , multi_process=lowerCamelCase , )
snake_case__ : int = PyTorchBenchmark(lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowerCamelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(lowerCamelCase , """env.csv""" ) ).exists() )
def __lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case__ : Tuple = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(lowerCamelCase : Dict ):
self.assertTrue(hasattr(lowerCamelCase , """sequential""" ) )
self.assertTrue(hasattr(lowerCamelCase , """cumulative""" ) )
self.assertTrue(hasattr(lowerCamelCase , """current""" ) )
self.assertTrue(hasattr(lowerCamelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowerCamelCase , inference=lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowerCamelCase , """log.txt""" ) , log_print=lowerCamelCase , trace_memory_line_by_line=lowerCamelCase , multi_process=lowerCamelCase , )
snake_case__ : List[str] = PyTorchBenchmark(lowerCamelCase )
snake_case__ : Dict = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowerCamelCase , """log.txt""" ) ).exists() )
| 172
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _A :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int=13 , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Tuple=99 , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : Dict=4 , lowerCamelCase : Tuple=37 , lowerCamelCase : Dict="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Tuple=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=4 , lowerCamelCase : Union[str, Any]=None , )-> List[Any]:
snake_case__ : str = parent
snake_case__ : Optional[int] = 13
snake_case__ : List[str] = 7
snake_case__ : Tuple = True
snake_case__ : List[str] = True
snake_case__ : List[str] = True
snake_case__ : Tuple = True
snake_case__ : List[str] = 99
snake_case__ : str = 384
snake_case__ : int = 2
snake_case__ : int = 4
snake_case__ : str = 37
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : Dict = 0.1
snake_case__ : str = 0.1
snake_case__ : str = 512
snake_case__ : List[Any] = 16
snake_case__ : List[Any] = 2
snake_case__ : str = 0.02
snake_case__ : int = 3
snake_case__ : int = 4
snake_case__ : Optional[int] = 128
snake_case__ : Tuple = 2
snake_case__ : str = 9
snake_case__ : Optional[int] = 1
snake_case__ : str = None
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : str = TFConvBertModel(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : Optional[Any] = [input_ids, input_mask]
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Optional[int] )-> Tuple:
snake_case__ : str = TFConvBertForMaskedLM(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] )-> Optional[int]:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : List[str] = TFConvBertForSequenceClassification(config=lowerCamelCase )
snake_case__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple )-> Dict:
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Tuple = TFConvBertForMultipleChoice(config=lowerCamelCase )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : str = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : str = self.num_labels
snake_case__ : Dict = TFConvBertForTokenClassification(config=lowerCamelCase )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] )-> Optional[Any]:
snake_case__ : int = TFConvBertForQuestionAnswering(config=lowerCamelCase )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Any )-> Tuple:
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[Any] = config_and_inputs
snake_case__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowercase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def __lowerCAmelCase ( self : str )-> Optional[Any]:
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self : int )-> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> Any:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] )-> Tuple:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def __lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> int:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def __lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = True
snake_case__ : List[Any] = True
if hasattr(lowerCamelCase , """use_cache""" ):
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : List[Any] = getattr(self.model_tester , """key_length""" , lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Any = len(model(lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase , saved_model=lowerCamelCase )
snake_case__ : Any = os.path.join(lowerCamelCase , """saved_model""" , """1""" )
snake_case__ : Dict = tf.keras.models.load_model(lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : Optional[int] = outputs["""encoder_hidden_states"""]
snake_case__ : str = outputs["""encoder_attentions"""]
else:
snake_case__ : int = outputs["""hidden_states"""]
snake_case__ : List[Any] = outputs["""attentions"""]
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Any )-> List[Any]:
snake_case__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> int:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
snake_case__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , """key_length""" , lowerCamelCase )
snake_case__ : Any = getattr(self.model_tester , """key_length""" , lowerCamelCase )
def check_decoder_attentions_output(lowerCamelCase : List[Any] ):
snake_case__ : Tuple = len(lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : str = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase : List[Any] ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : Any = False
snake_case__ : List[Any] = model_class(lowerCamelCase )
snake_case__ : int = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Any = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Dict = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Tuple = True
snake_case__ : Any = model_class(lowerCamelCase )
snake_case__ : str = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : List[Any] = True
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Optional[Any] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : str )-> str:
snake_case__ : List[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Optional[Any] = model(lowerCamelCase )[0]
snake_case__ : List[str] = [1, 6, 768]
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : int = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1e-4 )
| 172
| 1
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
_UpperCAmelCase : Any = '''path-to-your-trained-model'''
_UpperCAmelCase : Optional[int] = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('''cuda''')
_UpperCAmelCase : Dict = '''A photo of sks dog in a bucket'''
_UpperCAmelCase : Optional[int] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('''dog-bucket.png''')
| 107
|
from __future__ import annotations
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case_ , snake_case_ = array[indexa], array[indexa]
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
for i in range(lowercase__ , low + middle ):
comp_and_swap(lowercase__ , lowercase__ , i + middle , lowercase__ )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
bitonic_merge(lowercase__ , low + middle , lowercase__ , lowercase__ )
def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
'''simple docstring'''
if length > 1:
snake_case_ = int(length / 2 )
bitonic_sort(lowercase__ , lowercase__ , lowercase__ , 1 )
bitonic_sort(lowercase__ , low + middle , lowercase__ , 0 )
bitonic_merge(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if __name__ == "__main__":
A = input('Enter numbers separated by a comma:\n').strip()
A = [int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 187
| 0
|
from __future__ import annotations
import requests
def _lowerCAmelCase ( UpperCamelCase__: str ) -> dict:
"""simple docstring"""
A = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty'
return requests.get(UpperCamelCase__ ).json()
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> list[dict]:
"""simple docstring"""
A = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
A = requests.get(UpperCamelCase__ ).json()[:max_stories]
return [get_hackernews_story(UpperCamelCase__ ) for story_id in story_ids]
def _lowerCAmelCase ( UpperCamelCase__: int = 10 ) -> str:
"""simple docstring"""
A = hackernews_top_stories(UpperCamelCase__ )
return "\n".join("""* [{title}]({url})""".format(**UpperCamelCase__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 705
|
import sys
from collections import defaultdict
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> Any:
A = []
def _UpperCAmelCase ( self , a__ ) -> List[str]:
return self.node_position[vertex]
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = pos
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Any:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , a__ )
self.top_to_bottom(a__ , a__ , a__ , a__ )
def _UpperCAmelCase ( self , a__ , a__ , a__ , a__ ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] , a__ )
else:
A = val
A = temp
self.set_position(a__ , a__ )
break
A = parent
else:
A = val
A = temp
self.set_position(a__ , 0 )
def _UpperCAmelCase ( self , a__ , a__ ) -> Optional[int]:
A = len(a__ ) // 2 - 1
for i in range(a__ , -1 , -1 ):
self.top_to_bottom(a__ , a__ , len(a__ ) , a__ )
def _UpperCAmelCase ( self , a__ , a__ ) -> Any:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(a__ , 0 , len(a__ ) , a__ )
return temp
def _lowerCAmelCase ( UpperCamelCase__: List[str] ) -> List[str]:
"""simple docstring"""
A = Heap()
A = [0] * len(UpperCamelCase__ )
A = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
A = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
A = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase : Tuple = int(input("Enter number of edges: ").strip())
_lowercase : int = defaultdict(list)
for _ in range(edges_number):
_lowercase : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 546
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] =SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCamelCase : Tuple =4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowerCamelCase : Union[str, Any] =4
_lowerCamelCase : int =48
_lowerCamelCase : Optional[int] ='pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCamelCase : Dict =[6, 6, 6, 6]
_lowerCamelCase : List[Any] =60
_lowerCamelCase : Optional[Any] =[6, 6, 6, 6]
_lowerCamelCase : List[str] ='pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCamelCase : int =4
_lowerCamelCase : List[Any] ='nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowerCamelCase : Optional[int] =1
_lowerCamelCase : List[Any] =1
_lowerCamelCase : Union[str, Any] =126
_lowerCamelCase : int =7
_lowerCamelCase : Any =255.0
_lowerCamelCase : Dict =''
return config
def a_ ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_lowerCamelCase : Union[str, Any] =name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
_lowerCamelCase : str =name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
_lowerCamelCase : Union[str, Any] =name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
_lowerCamelCase : Dict =name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
_lowerCamelCase : Any =name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
_lowerCamelCase : Any =name.replace('attn' , 'attention.self' )
if "norm1" in name:
_lowerCamelCase : Dict =name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
_lowerCamelCase : Optional[int] =name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
_lowerCamelCase : Optional[Any] =name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
_lowerCamelCase : Any =name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
_lowerCamelCase : Union[str, Any] =name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
_lowerCamelCase : Dict =name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
_lowerCamelCase : List[Any] =name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
_lowerCamelCase : int =name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
_lowerCamelCase : int =name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
_lowerCamelCase : Union[str, Any] ='layernorm.weight'
if name == "norm.bias":
_lowerCamelCase : int ='layernorm.bias'
if "conv_first" in name:
_lowerCamelCase : Optional[Any] =name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowerCamelCase : List[Any] =name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowerCamelCase : Any =name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
_lowerCamelCase : Union[str, Any] =name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
_lowerCamelCase : Optional[Any] =name.replace('upsample.2' , 'upsample.convolution_1' )
_lowerCamelCase : Optional[int] ='upsample.' + name
elif config.upsampler == "pixelshuffledirect":
_lowerCamelCase : Tuple =name.replace('upsample.0.weight' , 'upsample.conv.weight' )
_lowerCamelCase : int =name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
_lowerCamelCase : List[str] ='swin2sr.' + name
return name
def a_ ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCamelCase : Any =orig_state_dict.pop(lowercase__ )
if "qkv" in key:
_lowerCamelCase : Optional[Any] =key.split('.' )
_lowerCamelCase : Tuple =int(key_split[1] )
_lowerCamelCase : Optional[int] =int(key_split[4] )
_lowerCamelCase : List[str] =config.embed_dim
if "weight" in key:
_lowerCamelCase : str =val[:dim, :]
_lowerCamelCase : Any =val[dim : dim * 2, :]
_lowerCamelCase : Union[str, Any] =val[-dim:, :]
else:
_lowerCamelCase : str =val[:dim]
_lowerCamelCase : List[str] =val[dim : dim * 2]
_lowerCamelCase : str =val[-dim:]
pass
else:
_lowerCamelCase : Dict =val
return orig_state_dict
def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Union[str, Any] =get_config(lowercase__ )
_lowerCamelCase : str =SwinaSRForImageSuperResolution(lowercase__ )
model.eval()
_lowerCamelCase : Dict =torch.hub.load_state_dict_from_url(lowercase__ , map_location='cpu' )
_lowerCamelCase : List[str] =convert_state_dict(lowercase__ , lowercase__ )
_lowerCamelCase , _lowerCamelCase : Dict =model.load_state_dict(lowercase__ , strict=lowercase__ )
if len(lowercase__ ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowercase__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'''Unexpected key {key} in state_dict''' )
# verify values
_lowerCamelCase : int ='https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
_lowerCamelCase : Optional[int] =Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ).convert('RGB' )
_lowerCamelCase : Union[str, Any] =SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowerCamelCase : List[Any] =126 if 'Jpeg' in checkpoint_url else 256
_lowerCamelCase : Union[str, Any] =Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowerCamelCase : int =transforms(lowercase__ ).unsqueeze(0 )
if config.num_channels == 1:
_lowerCamelCase : Optional[Any] =pixel_values[:, 0, :, :].unsqueeze(1 )
_lowerCamelCase : List[Any] =model(lowercase__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowerCamelCase : Any =torch.Size([1, 3, 512, 512] )
_lowerCamelCase : Union[str, Any] =torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCamelCase : str =torch.Size([1, 3, 1_024, 1_024] )
_lowerCamelCase : Optional[Any] =torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowerCamelCase : Optional[Any] =torch.Size([1, 3, 1_024, 1_024] )
_lowerCamelCase : List[Any] =torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCamelCase : Optional[int] =torch.Size([1, 3, 512, 512] )
_lowerCamelCase : int =torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCamelCase : Dict =torch.Size([1, 3, 1_024, 1_024] )
_lowerCamelCase : Tuple =torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'''Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'''
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowercase__ , atol=1e-3 )
print('Looks ok!' )
_lowerCamelCase : Dict ={
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
_lowerCamelCase : List[Any] =url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub(F'''caidas/{model_name}''' )
processor.push_to_hub(F'''caidas/{model_name}''' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
lowerCamelCase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 464
|
'''simple docstring'''
from typing import Any
class A :
def __init__( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self ) -> str:
'''simple docstring'''
return F'''Node({self.data})'''
class A :
def __init__( self ) -> int:
'''simple docstring'''
lowercase__ = None
def __iter__( self ) -> Any:
'''simple docstring'''
lowercase__ = self.head
while node:
yield node.data
lowercase__ = node.next
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ) -> str:
'''simple docstring'''
return "->".join([str(lowerCamelCase__ ) for item in self] )
def __getitem__( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowercase__ = self.head
for _ in range(lowerCamelCase__ ):
lowercase__ = current.next
lowercase__ = data
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ ) -> None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def A__ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowercase__ = Node(lowerCamelCase__ )
if self.head is None:
lowercase__ = new_node
elif index == 0:
lowercase__ = self.head # link new_node to head
lowercase__ = new_node
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = new_node
def A__ ( self ) -> None: # print every node data
'''simple docstring'''
print(self )
def A__ ( self ) -> Any:
'''simple docstring'''
return self.delete_nth(0 )
def A__ ( self ) -> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def A__ ( self , lowerCamelCase__ = 0 ) -> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowercase__ = self.head # default first node
if index == 0:
lowercase__ = self.head.next
else:
lowercase__ = self.head
for _ in range(index - 1 ):
lowercase__ = temp.next
lowercase__ = temp.next
lowercase__ = temp.next.next
return delete_node.data
def A__ ( self ) -> bool:
'''simple docstring'''
return self.head is None
def A__ ( self ) -> None:
'''simple docstring'''
lowercase__ = None
lowercase__ = self.head
while current:
# Store the current node's next node.
lowercase__ = current.next
# Make the current node's next point backwards
lowercase__ = prev
# Make the previous node be the current node
lowercase__ = current
# Make the current node the next node (to progress iteration)
lowercase__ = next_node
# Return prev in order to put the head at the end
lowercase__ = prev
def _A ( ):
lowercase__ = LinkedList()
assert linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowercase__ ) == i
linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowercase__ ) == 9
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(-8 , 1 ) )
def _A ( ):
lowercase__ = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
lowercase__ = LinkedList()
for i in test_input:
linked_list.insert_tail(lowercase__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowercase__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__ = linked_list.delete_head()
assert result == -9
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__ = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowercase__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowercase__ )
assert (
str(lowercase__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowercase__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowercase__ )
print("""\nReading/changing Node data using indexing:""" )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowercase__ = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowercase__ )
print(f'''length of linked_list is : {len(lowercase__ )}''' )
if __name__ == "__main__":
main()
| 325
| 0
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a_ = False
class __lowerCAmelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCAmelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = generator.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt='''first prompt''' , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
__lowerCamelCase = '''cyberpunk 2077'''
__lowerCamelCase = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.dual_guided(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , text_to_image_strength=0.75 , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = '''A painting of a squirrel eating a burger '''
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = pipe.text_to_image(
prompt=__UpperCAmelCase , generator=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowerCamelCase = pipe.image_variation(__UpperCAmelCase , generator=__UpperCAmelCase , output_type='''numpy''' ).images
__lowerCamelCase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowerCamelCase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 714
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = """lxmert"""
lowerCAmelCase__ = {}
def __init__( self , __UpperCAmelCase=30522 , __UpperCAmelCase=768 , __UpperCAmelCase=12 , __UpperCAmelCase=9500 , __UpperCAmelCase=1600 , __UpperCAmelCase=400 , __UpperCAmelCase=3072 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-1_2 , __UpperCAmelCase=9 , __UpperCAmelCase=5 , __UpperCAmelCase=5 , __UpperCAmelCase=2048 , __UpperCAmelCase=4 , __UpperCAmelCase=6.67 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_attention_heads
__lowerCamelCase = hidden_act
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = num_qa_labels
__lowerCamelCase = num_object_labels
__lowerCamelCase = num_attr_labels
__lowerCamelCase = l_layers
__lowerCamelCase = x_layers
__lowerCamelCase = r_layers
__lowerCamelCase = visual_feat_dim
__lowerCamelCase = visual_pos_dim
__lowerCamelCase = visual_loss_normalizer
__lowerCamelCase = task_matched
__lowerCamelCase = task_mask_lm
__lowerCamelCase = task_obj_predict
__lowerCamelCase = task_qa
__lowerCamelCase = visual_obj_loss
__lowerCamelCase = visual_attr_loss
__lowerCamelCase = visual_feat_loss
__lowerCamelCase = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__UpperCAmelCase )
| 622
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class _a ( __a , unittest.TestCase ):
"""simple docstring"""
A_ = DebertaVaTokenizer
A_ = DebertaVaTokenizerFast
A_ = True
A_ = True
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = DebertaVaTokenizer(lowercase_ , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : List[Any] , lowercase_ : str ):
'''simple docstring'''
lowercase_ = """this is a test"""
lowercase_ = """this is a test"""
return input_text, output_text
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase_ = """<pad>"""
lowercase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(lowercase_ ) , 30_001 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = """ \tHeLLo!how \n Are yoU? """
lowercase_ = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , split_by_punct=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , split_by_punct=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = """ \tHeLLo!how \n Are yoU? """
lowercase_ = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
lowercase_ = DebertaVaTokenizer(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , do_lower_case=lowercase_ , split_by_punct=lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ ) )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
lowercase_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(lowercase_ )
lowercase_ = rust_tokenizer.encode(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = """This is a test"""
lowercase_ = [13, 1, 4_398, 25, 21, 1_289]
lowercase_ = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
lowercase_ = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
lowercase_ = DebertaVaTokenizer(lowercase_ , keep_accents=lowercase_ )
lowercase_ = DebertaVaTokenizerFast(lowercase_ , keep_accents=lowercase_ )
lowercase_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
# fmt: off
lowercase_ = """I was born in 92000, and this is falsé."""
lowercase_ = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
lowercase_ = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
lowercase_ = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
lowercase_ = tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.encode(lowercase_ , add_special_tokens=lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.tokenize(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
lowercase_ = rust_tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(lowercase_ , lowercase_ )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
lowercase_ = DebertaVaTokenizer(lowercase_ )
lowercase_ = tokenizer.encode("""sequence builders""" )
lowercase_ = tokenizer.encode("""multi-sequence build""" )
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
lowercase_ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowercase_ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowercase_ , )
@slow
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
lowercase_ = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 451
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case = 10
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if array[i] == target:
return i
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
lowercase_ = 0
lowercase_ = len(SCREAMING_SNAKE_CASE_ )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowercase_ = one_third - 1
elif array[two_third] < target:
lowercase_ = two_third + 1
else:
lowercase_ = one_third + 1
lowercase_ = two_third - 1
else:
return -1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = (left + right) // 3 + 1
lowercase_ = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE_ , one_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input("""Enter numbers separated by comma:\n""").strip()
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
__snake_case = int(input("""Enter the number to be found in the list:\n""").strip())
__snake_case = ite_ternary_search(collection, target)
__snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 451
| 1
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
lowerCAmelCase__ : List[Any] = []
for line in lines:
lowerCAmelCase__ : Optional[Any] = re.sub(R"#.*" , "" , __lowercase ) # remove comments
if line:
filtered_lines.append(__lowercase )
lowerCAmelCase__ : List[Any] = '\n'.join(__lowercase )
# Make a hash from all this code
lowerCAmelCase__ : int = full_str.encode("utf-8" )
return shaaaa(__lowercase ).hexdigest()
# get importable module names and hash for caching
snake_case = {
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
snake_case = {
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
snake_case = {"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
snake_case = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 704
|
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
snake_case = logging.get_logger("""transformers.models.speecht5""")
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
hf_model.apply_weight_norm()
lowerCAmelCase__ : Tuple = checkpoint["input_conv.weight_g"]
lowerCAmelCase__ : Dict = checkpoint["input_conv.weight_v"]
lowerCAmelCase__ : Tuple = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
lowerCAmelCase__ : int = checkpoint[f'''upsamples.{i}.1.weight_g''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.weight_v''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_g''']
lowerCAmelCase__ : List[Any] = checkpoint[f'''blocks.{i}.convs1.{j}.1.weight_v''']
lowerCAmelCase__ : List[str] = checkpoint[f'''blocks.{i}.convs1.{j}.1.bias''']
lowerCAmelCase__ : Optional[int] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_g''']
lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.weight_v''']
lowerCAmelCase__ : Union[str, Any] = checkpoint[f'''blocks.{i}.convs2.{j}.1.bias''']
lowerCAmelCase__ : int = checkpoint["output_conv.1.weight_g"]
lowerCAmelCase__ : Optional[int] = checkpoint["output_conv.1.weight_v"]
lowerCAmelCase__ : str = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , ):
"""simple docstring"""
if config_path is not None:
lowerCAmelCase__ : int = SpeechTaHifiGanConfig.from_pretrained(lowerCamelCase_ )
else:
lowerCAmelCase__ : int = SpeechTaHifiGanConfig()
lowerCAmelCase__ : Tuple = SpeechTaHifiGan(lowerCamelCase_ )
lowerCAmelCase__ : Tuple = torch.load(lowerCamelCase_ )
load_weights(orig_checkpoint["model"]["generator"] , lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ : int = np.load(lowerCamelCase_ )
lowerCAmelCase__ : Union[str, Any] = stats[0].reshape(-1 )
lowerCAmelCase__ : int = stats[1].reshape(-1 )
lowerCAmelCase__ : Optional[Any] = torch.from_numpy(lowerCamelCase_ ).float()
lowerCAmelCase__ : List[Any] = torch.from_numpy(lowerCamelCase_ ).float()
model.save_pretrained(lowerCamelCase_ )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowerCamelCase_ )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--stats_path""", required=True, default=None, type=str, help="""Path to stats.npy file""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
snake_case = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 568
| 0
|
from __future__ import annotations
UpperCAmelCase__ : Optional[Any] = []
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[row][i] == 1:
return False
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(__SCREAMING_SNAKE_CASE , -1 , -1 ) , range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ):
if board[i][j] == 1:
return False
return True
def _lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
if row >= len(__SCREAMING_SNAKE_CASE ):
solution.append(__SCREAMING_SNAKE_CASE )
printboard(__SCREAMING_SNAKE_CASE )
print()
return True
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ : Dict = 1
solve(__SCREAMING_SNAKE_CASE , row + 1 )
UpperCamelCase__ : Union[str, Any] = 0
return False
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> None:
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if board[i][j] == 1:
print('Q' , end=' ' )
else:
print('.' , end=' ' )
print()
# n=int(input("The no. of queens"))
UpperCAmelCase__ : int = 8
UpperCAmelCase__ : List[str] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 410
|
import math
def _lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowercase ( __SCREAMING_SNAKE_CASE = 1_0001 ) -> int:
try:
UpperCamelCase__ : Any = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
UpperCamelCase__ : list[int] = []
UpperCamelCase__ : Any = 2
while len(__SCREAMING_SNAKE_CASE ) < nth:
if is_prime(__SCREAMING_SNAKE_CASE ):
primes.append(__SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(__SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 410
| 1
|
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""allenai/led-base-16384""": 1_6_3_8_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = LEDTokenizer
lowerCAmelCase__ = ["input_ids", "attention_mask"]
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ) -> int:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase_ = "post_processor"
lowercase_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase_ = tuple(state["sep"] )
if "cls" in state:
lowercase_ = tuple(state["cls"] )
lowercase_ = False
if state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = add_prefix_space
lowercase_ = True
if state.get("trim_offsets" , UpperCAmelCase ) != trim_offsets:
lowercase_ = trim_offsets
lowercase_ = True
if changes_to_apply:
lowercase_ = getattr(UpperCAmelCase , state.pop("type" ) )
lowercase_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def A__ ( self ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self , UpperCAmelCase ) -> Tuple:
'''simple docstring'''
lowercase_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase_ = value
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
'''simple docstring'''
lowercase_ = [self.sep_token_id]
lowercase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ) -> dict:
'''simple docstring'''
lowercase_ = super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase_ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase_ = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase_ = len(UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase_ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase_ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 601
|
from typing import Any
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = data
lowercase_ = None
class __lowerCamelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = None
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.head
while temp is not None:
print(temp.data , end=" " )
lowercase_ = temp.next
print()
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = Node(UpperCAmelCase )
lowercase_ = self.head
lowercase_ = new_node
def A__ ( self , UpperCAmelCase , UpperCAmelCase ) -> str:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
lowercase_ = self.head
while node_a is not None and node_a.data != node_data_a:
lowercase_ = node_a.next
if node_a is None or node_a is None:
return
lowercase_ , lowercase_ = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 601
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
class lowercase_ ( __snake_case ):
_lowerCamelCase = ['pixel_values']
def __init__( self , lowercase_ = True , lowercase_ = None , lowercase_ = None , lowercase_ = PILImageResampling.BILINEAR , lowercase_ = True , lowercase_ = 1 / 255 , lowercase_ = True , lowercase_ = None , lowercase_ = None , **lowercase_ , ):
super().__init__(**_lowerCAmelCase )
_snake_case : Tuple = size if size is not None else {"shortest_edge": 384}
_snake_case : List[Any] = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_snake_case : Optional[Any] = do_resize
_snake_case : List[Any] = size
# Default value set here for backwards compatibility where the value in config is None
_snake_case : List[str] = crop_pct if crop_pct is not None else 224 / 256
_snake_case : str = resample
_snake_case : List[str] = do_rescale
_snake_case : str = rescale_factor
_snake_case : Optional[Any] = do_normalize
_snake_case : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_snake_case : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = PILImageResampling.BICUBIC , lowercase_ = None , **lowercase_ , ):
_snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}""" )
_snake_case : Optional[int] = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
_snake_case : Tuple = int(shortest_edge / crop_pct )
_snake_case : Optional[int] = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_snake_case : Tuple = resize(image=_lowerCAmelCase , size=_lowerCAmelCase , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_lowerCAmelCase , size=(shortest_edge, shortest_edge) , data_format=_lowerCAmelCase , **_lowerCAmelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_lowerCAmelCase , size=(shortest_edge, shortest_edge) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCamelCase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ = None , **lowercase_ , ):
return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = ChannelDimension.FIRST , **lowercase_ , ):
_snake_case : Optional[int] = do_resize if do_resize is not None else self.do_resize
_snake_case : Union[str, Any] = crop_pct if crop_pct is not None else self.crop_pct
_snake_case : Dict = resample if resample is not None else self.resample
_snake_case : int = do_rescale if do_rescale is not None else self.do_rescale
_snake_case : str = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case : Tuple = do_normalize if do_normalize is not None else self.do_normalize
_snake_case : Any = image_mean if image_mean is not None else self.image_mean
_snake_case : Dict = image_std if image_std is not None else self.image_std
_snake_case : int = size if size is not None else self.size
_snake_case : int = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase )
_snake_case : Union[str, Any] = make_list_of_images(_lowerCAmelCase )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_snake_case : str = [to_numpy_array(_lowerCAmelCase ) for image in images]
if do_resize:
_snake_case : Union[str, Any] = [self.resize(image=_lowerCAmelCase , size=_lowerCAmelCase , crop_pct=_lowerCAmelCase , resample=_lowerCAmelCase ) for image in images]
if do_rescale:
_snake_case : int = [self.rescale(image=_lowerCAmelCase , scale=_lowerCAmelCase ) for image in images]
if do_normalize:
_snake_case : Optional[int] = [self.normalize(image=_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase ) for image in images]
_snake_case : Optional[int] = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images]
_snake_case : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
| 670
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE :Tuple = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Any:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
snake_case_ = r".*/layers_(\d+)"
snake_case_ = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , lowerCAmelCase_ )
snake_case_ = r"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
snake_case_ = re.sub(r"/mlp/" , r"/1/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
snake_case_ = re.sub(r"/mlp/" , r"/2/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case_ = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''{key} -> {new_key}''' )
snake_case_ = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case_ = s_dict[key].shape[0]
snake_case_ = s_dict[key]
for idx in range(lowerCAmelCase_ ):
snake_case_ = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(lowerCAmelCase_ )
return s_dict
SCREAMING_SNAKE_CASE :Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
snake_case_ = f.read()
snake_case_ = re.findall(r"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
snake_case_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case_ = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
snake_case_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
snake_case_ = str(activation[1] )
snake_case_ = num_experts
snake_case_ = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Dict=None , lowerCAmelCase_ :str="./" , lowerCAmelCase_ :Optional[int]=8 )->List[str]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
snake_case_ = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
snake_case_ = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
snake_case_ = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
snake_case_ = flax_params["target"]
snake_case_ = flatten_dict(lowerCAmelCase_ , sep="/" )
snake_case_ = rename_keys(lowerCAmelCase_ )
snake_case_ = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 283
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 702
|
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class _lowercase :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
__UpperCamelCase =str(id_ )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =[]
__UpperCamelCase ={} # {vertex:distance}
def __lt__( self : Dict , UpperCamelCase__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
return self.key < other.key
def __repr__( self : int ) -> Tuple:
'''simple docstring'''
return self.id
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
self.neighbors.append(UpperCamelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] ) -> Tuple:
'''simple docstring'''
__UpperCamelCase =weight
def lowerCAmelCase (__UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
"""simple docstring"""
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __UpperCamelCase )
graph[b - 1].add_edge(graph[a - 1] , __UpperCamelCase )
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ):
"""simple docstring"""
__UpperCamelCase =[]
for u in graph:
__UpperCamelCase =math.inf
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =graph[:]
while q:
__UpperCamelCase =min(__UpperCamelCase )
q.remove(__UpperCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__UpperCamelCase =u
__UpperCamelCase =u.edges[v.id]
for i in range(1 , len(__UpperCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowerCAmelCase (__UpperCamelCase : list , __UpperCamelCase : Vertex ):
"""simple docstring"""
for u in graph:
__UpperCamelCase =math.inf
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =list(__UpperCamelCase )
hq.heapify(__UpperCamelCase )
while h:
__UpperCamelCase =hq.heappop(__UpperCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__UpperCamelCase =u
__UpperCamelCase =u.edges[v.id]
hq.heapify(__UpperCamelCase )
for i in range(1 , len(__UpperCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowerCAmelCase ():
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 296
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __magic_name__ ( unittest.TestCase):
_UpperCAmelCase : Union[str, Any] = MODEL_FOR_MASKED_LM_MAPPING
_UpperCAmelCase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING
def _UpperCAmelCase ( self : str ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="sshleifer/tiny-distilroberta-base" ,top_k=2 ,framework="tf" )
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 3_8_0_1_5, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 2_5_5_0_6, "token_str": " accuser"},
] ,)
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 3_8_0_1_5,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 2_5_5_0_6,
"token_str": " accuser",
},
] ,)
UpperCAmelCase = unmasker("My name is <mask>" ,targets=[" Patrick", " Clara", " Teven"] ,top_k=3 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2_9_4_1, "token_str": " Te"},
] ,)
@require_torch
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="sshleifer/tiny-distilroberta-base" ,top_k=2 ,framework="pt" )
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 3_5_6_7_6, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 1_6_4_1_6, "token_str": "ELS"},
] ,)
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 3_5_6_7_6,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 1_6_4_1_6, "token_str": "ELS"},
] ,)
UpperCAmelCase = unmasker("My name is <mask>" ,targets=[" Patrick", " Clara", " Teven"] ,top_k=3 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2_9_4_1, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 1_3_6_0_6, "token_str": " Clara"},
] ,)
UpperCAmelCase = unmasker("My name is <mask> <mask>" ,top_k=2 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ,decimals=6 ) ,[
[
{
"score": 2.2e-05,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 3_5_6_7_6,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 1_6_4_1_6, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] ,)
@require_torch_gpu
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = pipeline("fill-mask" ,model="hf-internal-testing/tiny-random-distilbert" ,device=0 ,framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
@slow
@require_torch
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="distilroberta-base" ,top_k=2 ,framework="pt" )
self.run_large_test(__SCREAMING_SNAKE_CASE )
@slow
@require_tf
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="distilroberta-base" ,top_k=2 ,framework="tf" )
self.run_large_test(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : str ):
UpperCAmelCase = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) ,[
{"sequence": "My name is John", "score": 0.008, "token": 6_1_0, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1_5_7_3, "token_str": " Chris"},
] ,)
UpperCAmelCase = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) ,[
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2_2_0_1,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2_7_9_0,
"token_str": " Lyon",
},
] ,)
UpperCAmelCase = unmasker("My name is <mask>" ,targets=[" Patrick", " Clara", " Teven"] ,top_k=3 )
self.assertEqual(
nested_simplify(__SCREAMING_SNAKE_CASE ) ,[
{"sequence": "My name is Patrick", "score": 0.005, "token": 3_4_9_9, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3_6_0_6, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2_9_4_1, "token_str": " Te"},
] ,)
@require_torch
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="sshleifer/tiny-distilroberta-base" ,framework="pt" )
UpperCAmelCase = None
UpperCAmelCase = None
self.run_pipeline_test(__SCREAMING_SNAKE_CASE ,[] )
@require_tf
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = pipeline(task="fill-mask" ,model="sshleifer/tiny-distilroberta-base" ,framework="tf" )
UpperCAmelCase = None
UpperCAmelCase = None
self.run_pipeline_test(__SCREAMING_SNAKE_CASE ,[] )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def _UpperCAmelCase ( self : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = fill_masker.tokenizer
UpperCAmelCase = fill_masker.model
UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token}''' ,)
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
UpperCAmelCase = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
],
] ,)
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
fill_masker("This is" )
self.run_test_top_k(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.run_test_targets(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.run_test_top_k_targets(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.fill_mask_with_duplicate_targets_and_top_k(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
self.fill_mask_with_multiple_masks(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = tokenizer.get_vocab()
UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ,targets=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} ,set(__SCREAMING_SNAKE_CASE ) )
# Call argument
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__SCREAMING_SNAKE_CASE )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} ,set(__SCREAMING_SNAKE_CASE ) )
# Score equivalence
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__SCREAMING_SNAKE_CASE ) == set(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) ,nested_simplify(__SCREAMING_SNAKE_CASE ) )
# Raises with invalid
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets=[""] )
with self.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,targets="" )
def _UpperCAmelCase ( self : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE ,top_k=2 )
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
] ,)
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) ,nested_simplify(__SCREAMING_SNAKE_CASE ) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = tokenizer.get_vocab()
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
# top_k=2, ntargets=3
UpperCAmelCase = sorted(vocab.keys() )[:3]
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=2 ,targets=__SCREAMING_SNAKE_CASE )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase = [el["token_str"] for el in sorted(__SCREAMING_SNAKE_CASE ,key=lambda __SCREAMING_SNAKE_CASE : x["score"] ,reverse=__SCREAMING_SNAKE_CASE )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__SCREAMING_SNAKE_CASE ).issubset(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = fill_masker(f'''This is a {tokenizer.mask_token}''' ,top_k=3 ,targets=__SCREAMING_SNAKE_CASE )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__SCREAMING_SNAKE_CASE ) ,nested_simplify(__SCREAMING_SNAKE_CASE ) )
def _UpperCAmelCase ( self : Optional[Any] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase = sorted(vocab.keys() )[:3]
UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase = fill_masker(f'''My name is {tokenizer.mask_token}''' ,targets=__SCREAMING_SNAKE_CASE ,top_k=1_0 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,3 )
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : List[Any] ):
UpperCAmelCase = FillMaskPipeline(model=__SCREAMING_SNAKE_CASE ,tokenizer=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' ,top_k=2 )
self.assertEqual(
__SCREAMING_SNAKE_CASE ,[
[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
],
[
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
{"sequence": ANY(__SCREAMING_SNAKE_CASE ), "score": ANY(__SCREAMING_SNAKE_CASE ), "token": ANY(__SCREAMING_SNAKE_CASE ), "token_str": ANY(__SCREAMING_SNAKE_CASE )},
],
] ,)
| 333
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={'''configuration_sew''': ['''SEW_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SEWConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] =[
'''SEW_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SEWForCTC''',
'''SEWForSequenceClassification''',
'''SEWModel''',
'''SEWPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 172
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> int:
if len(A_ ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
a_ : str = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
SCREAMING_SNAKE_CASE_ = random.Random()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=1.0, SCREAMING_SNAKE_CASE__=None, SCREAMING_SNAKE_CASE__=None ) -> Tuple:
if rng is None:
a_ : Any = global_rng
a_ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class snake_case_ ( unittest.TestCase ):
def __init__( self , a_ , a_=7 , a_=4_0_0 , a_=2_0_0_0 , a_=2_0_4_8 , a_=1_2_8 , a_=1 , a_=5_1_2 , a_=3_0 , a_=4_4_1_0_0 , ):
a_ : Optional[Any] = parent
a_ : Tuple = batch_size
a_ : Union[str, Any] = min_seq_length
a_ : Dict = max_seq_length
a_ : Union[str, Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a_ : int = spectrogram_length
a_ : Optional[Any] = feature_size
a_ : Optional[int] = num_audio_channels
a_ : List[str] = hop_length
a_ : Optional[int] = chunk_length
a_ : List[str] = sampling_rate
def snake_case_ ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case_ ( self , a_=False , a_=False ):
def _flatten(a_ ):
return list(itertools.chain(*a_ ) )
if equal_length:
a_ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a_ : Tuple = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = TvltFeatureExtractor
def snake_case_ ( self ):
a_ : Dict = TvltFeatureExtractionTester(self )
def snake_case_ ( self ):
a_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(a_ , "spectrogram_length" ) )
self.assertTrue(hasattr(a_ , "feature_size" ) )
self.assertTrue(hasattr(a_ , "num_audio_channels" ) )
self.assertTrue(hasattr(a_ , "hop_length" ) )
self.assertTrue(hasattr(a_ , "chunk_length" ) )
self.assertTrue(hasattr(a_ , "sampling_rate" ) )
def snake_case_ ( self ):
a_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : List[Any] = feat_extract_first.save_pretrained(a_ )[0]
check_json_file_has_correct_format(a_ )
a_ : Tuple = self.feature_extraction_class.from_pretrained(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : List[str] = feat_extract_second.to_dict()
a_ : List[Any] = dict_first.pop("mel_filters" )
a_ : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
a_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a_ : Dict = os.path.join(a_ , "feat_extract.json" )
feat_extract_first.to_json_file(a_ )
a_ : str = self.feature_extraction_class.from_json_file(a_ )
a_ : Dict = feat_extract_first.to_dict()
a_ : str = feat_extract_second.to_dict()
a_ : int = dict_first.pop("mel_filters" )
a_ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(a_ , a_ ) )
self.assertEqual(a_ , a_ )
def snake_case_ ( self ):
# Initialize feature_extractor
a_ : str = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
a_ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
a_ : Union[str, Any] = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test not batched input
a_ : Dict = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
a_ : int = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
a_ : Optional[int] = feature_extractor(
a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=a_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
a_ : List[str] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
a_ : Union[str, Any] = np.asarray(a_ )
a_ : Dict = feature_extractor(a_ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case_ ( self , a_ ):
a_ : Optional[int] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a_ : List[str] = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case_ ( self ):
a_ : List[str] = self._load_datasamples(1 )
a_ : Any = TvltFeatureExtractor()
a_ : Union[str, Any] = feature_extractor(a_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
a_ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1e-4 ) )
| 370
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
lowerCamelCase__ : int = logging.get_logger(__name__)
lowerCamelCase__ : int = {
"""openai/whisper-base""": """https://huggingface.co/openai/whisper-base/resolve/main/config.json""",
}
# fmt: off
lowerCamelCase__ : Dict = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
lowerCamelCase__ : str = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : List[str] = 'whisper'
__lowerCAmelCase : str = ['past_key_values']
__lowerCAmelCase : Optional[int] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_18_65 , SCREAMING_SNAKE_CASE_=80 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=15_36 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=5_02_57 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=15_00 , SCREAMING_SNAKE_CASE_=4_48 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=5_02_56 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=[2_20, 5_02_56] , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2_56 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=0.0_5 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=7 , **SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : str = vocab_size
lowercase__ : Tuple = num_mel_bins
lowercase__ : int = d_model
lowercase__ : str = encoder_layers
lowercase__ : str = encoder_attention_heads
lowercase__ : Any = decoder_layers
lowercase__ : Optional[int] = decoder_attention_heads
lowercase__ : List[str] = decoder_ffn_dim
lowercase__ : Dict = encoder_ffn_dim
lowercase__ : Dict = dropout
lowercase__ : Dict = attention_dropout
lowercase__ : List[str] = activation_dropout
lowercase__ : List[str] = activation_function
lowercase__ : Dict = init_std
lowercase__ : int = encoder_layerdrop
lowercase__ : List[str] = decoder_layerdrop
lowercase__ : List[str] = use_cache
lowercase__ : Tuple = encoder_layers
lowercase__ : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ : int = max_source_positions
lowercase__ : Tuple = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
lowercase__ : Tuple = classifier_proj_size
lowercase__ : Optional[int] = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ : Optional[int] = apply_spec_augment
lowercase__ : Any = mask_time_prob
lowercase__ : str = mask_time_length
lowercase__ : int = mask_time_min_masks
lowercase__ : List[Any] = mask_feature_prob
lowercase__ : List[Any] = mask_feature_length
lowercase__ : Optional[int] = mask_feature_min_masks
lowercase__ : Dict = median_filter_width
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , suppress_tokens=SCREAMING_SNAKE_CASE_ , begin_suppress_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
class _snake_case ( UpperCAmelCase_ ):
@property
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
])
if self.use_past:
lowercase__ : List[str] = {0: """batch"""}
else:
lowercase__ : int = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE_ , direction="""inputs""")
return common_inputs
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 2_20_50 , SCREAMING_SNAKE_CASE_ = 5.0 , SCREAMING_SNAKE_CASE_ = 2_20 , ):
'''simple docstring'''
lowercase__ : str = OrderedDict()
lowercase__ : List[Any] = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=SCREAMING_SNAKE_CASE_ , framework=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , time_duration=SCREAMING_SNAKE_CASE_ , frequency=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Dict = encoder_inputs["""input_features"""].shape[2]
lowercase__ : List[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
lowercase__ : Optional[Any] = super().generate_dummy_inputs(
preprocessor.tokenizer , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = encoder_inputs.pop("""input_features""")
lowercase__ : Optional[Any] = decoder_inputs.pop("""decoder_input_ids""")
if "past_key_values" in decoder_inputs:
lowercase__ : Dict = decoder_inputs.pop("""past_key_values""")
return dummy_inputs
@property
def lowercase__ ( self):
'''simple docstring'''
return 1E-3
| 12
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : int = (DDPMScheduler,)
def lowercase__ ( self , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**SCREAMING_SNAKE_CASE_)
return config
def lowercase__ ( self):
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2]):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ , beta_end=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ , prediction_type=SCREAMING_SNAKE_CASE_ , sample_max_value=SCREAMING_SNAKE_CASE_ , )
def lowercase__ ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Union[str, Any] = self.get_scheduler_config()
lowercase__ : List[Any] = scheduler_class(**SCREAMING_SNAKE_CASE_)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 0.0)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87) - 0.0_0_9_7_9)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99) - 0.0_2)) < 1E-5
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[Any] = self.dummy_sample_deter
lowercase__ : str = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : List[str] = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : str = pred_prev_sample
lowercase__ : Optional[int] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : Optional[Any] = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_5_8.9_6_0_6) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = self.scheduler_classes[0]
lowercase__ : Tuple = self.get_scheduler_config(prediction_type="""v_prediction""")
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = len(SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter
lowercase__ : int = torch.manual_seed(0)
for t in reversed(range(SCREAMING_SNAKE_CASE_)):
# 1. predict noise residual
lowercase__ : List[Any] = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
# 2. predict previous mean of sample x_t-1
lowercase__ : int = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase__ : Tuple = pred_prev_sample
lowercase__ : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_))
lowercase__ : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_))
assert abs(result_sum.item() - 2_0_2.0_2_9_6) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1) < 1E-3
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(SCREAMING_SNAKE_CASE_):
if i == len(SCREAMING_SNAKE_CASE_) - 1:
lowercase__ : Optional[int] = -1
else:
lowercase__ : Tuple = timesteps[i + 1]
lowercase__ : Any = scheduler.previous_timestep(SCREAMING_SNAKE_CASE_)
lowercase__ : int = prev_t.item()
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""`custom_timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : List[Any] = self.get_scheduler_config()
lowercase__ : int = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : int = [1_00, 87, 50, 1, 0]
lowercase__ : Union[str, Any] = len(SCREAMING_SNAKE_CASE_)
with self.assertRaises(SCREAMING_SNAKE_CASE_ , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`."""):
scheduler.set_timesteps(num_inference_steps=SCREAMING_SNAKE_CASE_ , timesteps=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = self.scheduler_classes[0]
lowercase__ : int = self.get_scheduler_config()
lowercase__ : Dict = scheduler_class(**SCREAMING_SNAKE_CASE_)
lowercase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
SCREAMING_SNAKE_CASE_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=SCREAMING_SNAKE_CASE_)
| 12
| 1
|
"""simple docstring"""
import os
def a ( __UpperCAmelCase : str = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(__UpperCAmelCase ) , __UpperCAmelCase ) ) as input_file:
__magic_name__: Optional[int] = [
[int(__UpperCAmelCase ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
__magic_name__: Dict = len(__UpperCAmelCase )
__magic_name__: int = len(matrix[0] )
__magic_name__: Union[str, Any] = [[-1 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
for i in range(__UpperCAmelCase ):
__magic_name__: List[Any] = matrix[i][0]
for j in range(1 , __UpperCAmelCase ):
for i in range(__UpperCAmelCase ):
__magic_name__: int = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __UpperCAmelCase ):
__magic_name__: Optional[int] = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__magic_name__: Optional[Any] = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 213
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class __A ( unittest.TestCase ):
UpperCAmelCase__ = MODEL_FOR_CAUSAL_LM_MAPPING
UpperCAmelCase__ = TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def lowerCamelCase__ ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""pt""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Dict = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
] , )
__magic_name__: Dict = text_generator(["""This is a test""", """This is a second test"""] )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope."""
""" oscope. FiliFili@@"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy"""
""" oscope. oscope. FiliFili@@"""
)
}
],
] , )
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case , num_return_sequences=2 , return_tensors=__snake_case )
self.assertEqual(
__snake_case , [
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
] , )
__magic_name__: List[str] = text_generator.model.config.eos_token_id
__magic_name__: Dict = """<pad>"""
__magic_name__: Dict = text_generator(
["""This is a test""", """This is a second test"""] , do_sample=__snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=__snake_case , )
self.assertEqual(
__snake_case , [
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
[
{"""generated_token_ids""": ANY(__snake_case )},
{"""generated_token_ids""": ANY(__snake_case )},
],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__: int = pipeline(task="""text-generation""" , model="""sshleifer/tiny-ctrl""" , framework="""tf""" )
# Using `do_sample=False` to force deterministic output
__magic_name__: Optional[Any] = text_generator("""This is a test""" , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
] , )
__magic_name__: Optional[int] = text_generator(["""This is a test""", """This is a second test"""] , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[
{
"""generated_text""": (
"""This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵"""
""" please,"""
)
}
],
[
{
"""generated_text""": (
"""This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes"""
""" Cannes 閲閲Cannes Cannes Cannes 攵 please,"""
)
}
],
] , )
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Union[str, Any] , __snake_case : str , __snake_case : Tuple ) -> Any:
__magic_name__: int = TextGenerationPipeline(model=__snake_case , tokenizer=__snake_case )
return text_generator, ["This is a test", "Another test"]
def lowerCamelCase__ ( self : Union[str, Any] ) -> int:
__magic_name__: Tuple = """Hello I believe in"""
__magic_name__: List[str] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__: List[Any] = text_generator(__snake_case )
self.assertEqual(
__snake_case , [{"""generated_text""": """Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"""}] , )
__magic_name__: List[str] = text_generator(__snake_case , stop_sequence=""" fe""" )
self.assertEqual(__snake_case , [{"""generated_text""": """Hello I believe in fe"""}] )
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any] ) -> str:
__magic_name__: Optional[int] = text_generator.model
__magic_name__: Union[str, Any] = text_generator.tokenizer
__magic_name__: Union[str, Any] = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: str = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = pipeline(task="""text-generation""" , model=__snake_case , tokenizer=__snake_case , return_full_text=__snake_case )
__magic_name__: Tuple = text_generator("""This is a test""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertNotIn("""This is a test""" , outputs[0]["""generated_text"""] )
__magic_name__: Optional[int] = text_generator("""This is a test""" , return_full_text=__snake_case )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
self.assertTrue(outputs[0]["""generated_text"""].startswith("""This is a test""" ) )
__magic_name__: List[str] = text_generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
if text_generator.tokenizer.pad_token is not None:
__magic_name__: Union[str, Any] = text_generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__snake_case )
self.assertEqual(
__snake_case , [
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
[{"""generated_text""": ANY(__snake_case )}, {"""generated_text""": ANY(__snake_case )}],
] , )
with self.assertRaises(__snake_case ):
__magic_name__: Any = text_generator("""test""" , return_full_text=__snake_case , return_text=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: List[str] = text_generator("""test""" , return_full_text=__snake_case , return_tensors=__snake_case )
with self.assertRaises(__snake_case ):
__magic_name__: Tuple = text_generator("""test""" , return_text=__snake_case , return_tensors=__snake_case )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
__magic_name__: int = text_generator("""""" )
self.assertEqual(__snake_case , [{"""generated_text""": ANY(__snake_case )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
__magic_name__: Any = text_generator("""""" )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
__magic_name__: Union[str, Any] = ["""RwkvForCausalLM""", """XGLMForCausalLM""", """GPTNeoXForCausalLM"""]
if (
tokenizer.model_max_length < 1_0_0_0_0
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator("""This is a test""" * 5_0_0 , max_new_tokens=2_0 )
__magic_name__: List[str] = text_generator("""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=2_0 )
# Hole strategy cannot work
with self.assertRaises(__snake_case ):
text_generator(
"""This is a test""" * 5_0_0 , handle_long_generation="""hole""" , max_new_tokens=tokenizer.model_max_length + 1_0 , )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
import torch
# Classic `model_kwargs`
__magic_name__: Optional[int] = pipeline(
model="""hf-internal-testing/tiny-random-bloom""" , model_kwargs={"""device_map""": """auto""", """torch_dtype""": torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[int] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
__magic_name__: Optional[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
__magic_name__: Optional[Any] = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
__magic_name__: int = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
__magic_name__: Any = pipe("""This is a test""" )
self.assertEqual(
__snake_case , [
{
"""generated_text""": (
"""This is a test test test test test test test test test test test test test test test test"""
""" test"""
)
}
] , )
@require_torch
@require_torch_gpu
def lowerCamelCase__ ( self : List[str] ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device=0 , torch_dtype=torch.floataa )
pipe("""This is a test""" )
@require_torch
@require_accelerate
@require_torch_gpu
def lowerCamelCase__ ( self : Dict ) -> Any:
import torch
__magic_name__: List[Any] = pipeline(model="""hf-internal-testing/tiny-random-bloom""" , device_map="""auto""" , torch_dtype=torch.floataa )
pipe("""This is a test""" , do_sample=__snake_case , top_p=0.5 )
def lowerCamelCase__ ( self : List[str] ) -> Any:
__magic_name__: Optional[int] = """Hello world"""
__magic_name__: List[Any] = pipeline("""text-generation""" , model="""hf-internal-testing/tiny-random-gpt2""" )
if text_generator.model.framework == "tf":
__magic_name__: str = logging.get_logger("""transformers.generation.tf_utils""" )
else:
__magic_name__: Any = logging.get_logger("""transformers.generation.utils""" )
__magic_name__: Union[str, Any] = """Both `max_new_tokens`""" # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 , max_new_tokens=1 )
self.assertIn(__snake_case , cl.out )
# The user only sets one -> no warning
with CaptureLogger(__snake_case ) as cl:
__magic_name__: str = text_generator(__snake_case , max_new_tokens=1 )
self.assertNotIn(__snake_case , cl.out )
with CaptureLogger(__snake_case ) as cl:
__magic_name__: Dict = text_generator(__snake_case , max_length=1_0 )
self.assertNotIn(__snake_case , cl.out )
| 213
| 1
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=14 , UpperCAmelCase_ : int=7 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : Dict=37 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : int=512 , UpperCAmelCase_ : Optional[int]=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : Optional[Any]=None , ):
SCREAMING_SNAKE_CASE : List[str] = parent
SCREAMING_SNAKE_CASE : Tuple = batch_size
SCREAMING_SNAKE_CASE : List[str] = seq_length
SCREAMING_SNAKE_CASE : Union[str, Any] = is_training
SCREAMING_SNAKE_CASE : Any = use_token_type_ids
SCREAMING_SNAKE_CASE : Tuple = use_input_mask
SCREAMING_SNAKE_CASE : Any = use_labels
SCREAMING_SNAKE_CASE : Tuple = use_mc_token_ids
SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE : str = type_sequence_label_size
SCREAMING_SNAKE_CASE : Any = initializer_range
SCREAMING_SNAKE_CASE : str = num_labels
SCREAMING_SNAKE_CASE : int = num_choices
SCREAMING_SNAKE_CASE : Dict = scope
SCREAMING_SNAKE_CASE : str = self.vocab_size - 1
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE : str = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE : List[str] = None
SCREAMING_SNAKE_CASE : int = None
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE : Dict = self.get_config()
SCREAMING_SNAKE_CASE : Union[str, Any] = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _A ( self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _A ( self : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any] , *UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : str = CTRLModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ )
model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[str] = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _A ( self : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] ):
SCREAMING_SNAKE_CASE : Optional[int] = CTRLLMHeadModel(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : int = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) : Optional[Any] = config_and_inputs
SCREAMING_SNAKE_CASE : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _A ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : List[str] ):
SCREAMING_SNAKE_CASE : int = self.num_labels
SCREAMING_SNAKE_CASE : Dict = CTRLForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
SCREAMING_SNAKE_CASE : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE : Any = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCamelCase_ : Any = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCamelCase_ : Dict = (
{
'''feature-extraction''': CTRLModel,
'''text-classification''': CTRLForSequenceClassification,
'''text-generation''': CTRLLMHeadModel,
'''zero-shot''': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : List[str] = False
def _A ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _A ( self : str ):
SCREAMING_SNAKE_CASE : Union[str, Any] = CTRLModelTester(self )
SCREAMING_SNAKE_CASE : Any = ConfigTester(self , config_class=UpperCAmelCase_ , n_embd=37 )
def _A ( self : Dict ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _A ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _A ( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCAmelCase_ )
def _A ( self : List[Any] ):
SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCAmelCase_ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _A ( self : int ):
pass
@slow
def _A ( self : Any ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE : Any = CTRLModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _A ( self : Dict ):
pass
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _A ( self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _A ( self : Dict ):
SCREAMING_SNAKE_CASE : Dict = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=UpperCAmelCase_ ) # Legal the president is
SCREAMING_SNAKE_CASE : List[str] = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE : str = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCAmelCase_ )
| 62
|
import warnings
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
A__ : Optional[Any] = ["input_values", "attention_mask"]
def __init__( self : str , _snake_case : int = 1 , _snake_case : int = 1_60_00 , _snake_case : float = 0.0 , _snake_case : bool = False , _snake_case : int = 80 , _snake_case : int = 16 , _snake_case : int = 64 , _snake_case : str = "hann_window" , _snake_case : float = 1.0 , _snake_case : float = 80 , _snake_case : float = 76_00 , _snake_case : float = 1E-10 , _snake_case : int = 2 , _snake_case : bool = True , **_snake_case : Union[str, Any] , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
A__ = do_normalize
A__ = return_attention_mask
A__ = num_mel_bins
A__ = hop_length
A__ = win_length
A__ = win_function
A__ = frame_signal_scale
A__ = fmin
A__ = fmax
A__ = mel_floor
A__ = reduction_factor
A__ = win_length * sampling_rate // 10_00
A__ = hop_length * sampling_rate // 10_00
A__ = optimal_fft_length(self.sample_size )
A__ = (self.n_fft // 2) + 1
A__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=_snake_case )
A__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='slaney' , mel_scale='slaney' , )
if frame_signal_scale != 1.0:
warnings.warn(
'The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
if reduction_factor != 2.0:
warnings.warn(
'The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers' , _snake_case , )
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _a ( _snake_case : List[np.ndarray] , _snake_case : List[np.ndarray] , _snake_case : float = 0.0 ):
"""simple docstring"""
if attention_mask is not None:
A__ = np.array(_snake_case , np.intaa )
A__ = []
for vector, length in zip(_snake_case , attention_mask.sum(-1 ) ):
A__ = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
A__ = padding_value
normed_input_values.append(_snake_case )
else:
A__ = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def _a ( self : Tuple , _snake_case : np.ndarray , ):
"""simple docstring"""
A__ = spectrogram(
_snake_case , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='log10' , )
return log_mel_spec.T
def __call__( self : List[str] , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Optional[Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]]] = None , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , _snake_case : Optional[int] = None , **_snake_case : Tuple , ):
"""simple docstring"""
if audio is None and audio_target is None:
raise ValueError('You must provide either `audio` or `audio_target` values.' )
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if audio is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
else:
A__ = None
if audio_target is not None:
A__ = self._process_audio(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , **_snake_case , )
if inputs is None:
return inputs_target
else:
A__ = inputs_target['input_values']
A__ = inputs_target.get('attention_mask' )
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def _a ( self : Tuple , _snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _snake_case : bool = False , _snake_case : Union[bool, str, PaddingStrategy] = False , _snake_case : Optional[int] = None , _snake_case : bool = False , _snake_case : Optional[int] = None , _snake_case : Optional[bool] = None , _snake_case : Optional[Union[str, TensorType]] = None , **_snake_case : Tuple , ):
"""simple docstring"""
A__ = isinstance(_snake_case , np.ndarray ) and len(speech.shape ) > 1
if is_batched_numpy and len(speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
A__ = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for speech in speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
A__ = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and speech.dtype is np.dtype(np.floataa ):
A__ = speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [speech]
# needed to make pad() work on spectrogram inputs
A__ = self.feature_size
# convert into correct format for padding
if is_target:
A__ = [self._extract_mel_features(_snake_case ) for waveform in speech]
A__ = BatchFeature({'input_values': features} )
A__ = self.num_mel_bins
else:
A__ = BatchFeature({'input_values': speech} )
A__ = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
A__ = feature_size_hack
# convert input values to correct format
A__ = padded_inputs['input_values']
if not isinstance(input_values[0] , np.ndarray ):
A__ = [np.asarray(_snake_case , dtype=np.floataa ) for array in input_values]
elif (
not isinstance(_snake_case , np.ndarray )
and isinstance(input_values[0] , np.ndarray )
and input_values[0].dtype is np.dtype(np.floataa )
):
A__ = [array.astype(np.floataa ) for array in input_values]
elif isinstance(_snake_case , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ):
A__ = input_values.astype(np.floataa )
# convert attention_mask to correct format
A__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
A__ = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# zero-mean and unit-variance normalization
if not is_target and self.do_normalize:
A__ = (
attention_mask
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
A__ = self.zero_mean_unit_var_norm(
padded_inputs['input_values'] , attention_mask=_snake_case , padding_value=self.padding_value )
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
def _a ( self : Optional[Any] ):
"""simple docstring"""
A__ = super().to_dict()
# Don't serialize these as they are derived from the other properties.
A__ = ['window', 'mel_filters', 'sample_size', 'sample_stride', 'n_fft', 'n_freqs']
for name in names:
if name in output:
del output[name]
return output
| 9
| 0
|
"""simple docstring"""
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = jnp.floataa
SCREAMING_SNAKE_CASE = True
def _UpperCamelCase ( self ):
super().setup()
UpperCAmelCase = nn.Dense(5 ,dtype=self.dtype )
def __call__( self ,*A ,**A ):
UpperCAmelCase = super().__call__(*A ,**A )
UpperCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class lowerCamelCase__ ( snake_case ):
SCREAMING_SNAKE_CASE = FlaxBigBirdForNaturalQuestionsModule
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
def cross_entropy(_snake_case , _snake_case , _snake_case=None ):
UpperCAmelCase = logits.shape[-1]
UpperCAmelCase = (labels[..., None] == jnp.arange(_snake_case )[None]).astype("""f4""" )
UpperCAmelCase = jax.nn.log_softmax(_snake_case , axis=-1 )
UpperCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
UpperCAmelCase = reduction(_snake_case )
return loss
UpperCAmelCase = partial(_snake_case , reduction=jnp.mean )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
UpperCAmelCase = cross_entropy(_snake_case , _snake_case )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = "google/bigbird-roberta-base"
SCREAMING_SNAKE_CASE = 3_000
SCREAMING_SNAKE_CASE = 10_500
SCREAMING_SNAKE_CASE = 128
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 5
# tx_args
SCREAMING_SNAKE_CASE = 3e-5
SCREAMING_SNAKE_CASE = 0.0
SCREAMING_SNAKE_CASE = 20_000
SCREAMING_SNAKE_CASE = 0.00_95
SCREAMING_SNAKE_CASE = "bigbird-roberta-natural-questions"
SCREAMING_SNAKE_CASE = "training-expt"
SCREAMING_SNAKE_CASE = "data/nq-training.jsonl"
SCREAMING_SNAKE_CASE = "data/nq-validation.jsonl"
def _UpperCamelCase ( self ):
os.makedirs(self.base_dir ,exist_ok=A )
UpperCAmelCase = os.path.join(self.base_dir ,self.save_dir )
UpperCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 4_096 # no dynamic padding on TPUs
def __call__( self ,A ):
UpperCAmelCase = self.collate_fn(A )
UpperCAmelCase = jax.tree_util.tree_map(A ,A )
return batch
def _UpperCamelCase ( self ,A ):
UpperCAmelCase , UpperCAmelCase = self.fetch_inputs(features["""input_ids"""] )
UpperCAmelCase = {
"""input_ids""": jnp.array(A ,dtype=jnp.intaa ),
"""attention_mask""": jnp.array(A ,dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] ,dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] ,dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] ,dtype=jnp.intaa ),
}
return batch
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = [self._fetch_inputs(A ) for ids in input_ids]
return zip(*A )
def _UpperCamelCase ( self ,A ):
UpperCAmelCase = [1 for _ in range(len(A ) )]
while len(A ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _a ( _snake_case , _snake_case , _snake_case=None ):
"""simple docstring"""
if seed is not None:
UpperCAmelCase = dataset.shuffle(seed=_snake_case )
for i in range(len(_snake_case ) // batch_size ):
UpperCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(_snake_case )
@partial(jax.pmap , axis_name="""batch""" )
def _a ( _snake_case , _snake_case , **_snake_case ):
"""simple docstring"""
def loss_fn(_snake_case ):
UpperCAmelCase = model_inputs.pop("""start_labels""" )
UpperCAmelCase = model_inputs.pop("""end_labels""" )
UpperCAmelCase = model_inputs.pop("""pooled_labels""" )
UpperCAmelCase = state.apply_fn(**_snake_case , params=_snake_case , dropout_rng=_snake_case , train=_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
return state.loss_fn(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , )
UpperCAmelCase , UpperCAmelCase = jax.random.split(_snake_case )
UpperCAmelCase = jax.value_and_grad(_snake_case )
UpperCAmelCase , UpperCAmelCase = grad_fn(state.params )
UpperCAmelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
UpperCAmelCase = jax.lax.pmean(_snake_case , """batch""" )
UpperCAmelCase = state.apply_gradients(grads=_snake_case )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def _a ( _snake_case , **_snake_case ):
"""simple docstring"""
UpperCAmelCase = model_inputs.pop("""start_labels""" )
UpperCAmelCase = model_inputs.pop("""end_labels""" )
UpperCAmelCase = model_inputs.pop("""pooled_labels""" )
UpperCAmelCase = state.apply_fn(**_snake_case , params=state.params , train=_snake_case )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = outputs
UpperCAmelCase = state.loss_fn(_snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class lowerCamelCase__ ( train_state.TrainState ):
SCREAMING_SNAKE_CASE = struct.field(pytree_node=snake_case )
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = None
def _UpperCamelCase ( self ,A ,A ,A ,A=None ):
UpperCAmelCase = model.params
UpperCAmelCase = TrainState.create(
apply_fn=model.__call__ ,params=A ,tx=A ,loss_fn=A ,)
if ckpt_dir is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = restore_checkpoint(A ,A )
UpperCAmelCase = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
UpperCAmelCase , UpperCAmelCase = build_tx(**A )
UpperCAmelCase = train_state.TrainState(
step=A ,apply_fn=model.__call__ ,params=A ,tx=A ,opt_state=A ,)
UpperCAmelCase = args
UpperCAmelCase = data_collator
UpperCAmelCase = lr
UpperCAmelCase = params
UpperCAmelCase = jax_utils.replicate(A )
return state
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.args
UpperCAmelCase = len(A ) // args.batch_size
UpperCAmelCase = jax.random.PRNGKey(0 )
UpperCAmelCase = jax.random.split(A ,jax.device_count() )
for epoch in range(args.max_epochs ):
UpperCAmelCase = jnp.array(0 ,dtype=jnp.floataa )
UpperCAmelCase = get_batched_dataset(A ,args.batch_size ,seed=A )
UpperCAmelCase = 0
for batch in tqdm(A ,total=A ,desc=F'''Running EPOCH-{epoch}''' ):
UpperCAmelCase = self.data_collator(A )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self.train_step_fn(A ,A ,**A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
UpperCAmelCase = jax_utils.unreplicate(state.step )
UpperCAmelCase = running_loss.item() / i
UpperCAmelCase = self.scheduler_fn(state_step - 1 )
UpperCAmelCase = self.evaluate(A ,A )
UpperCAmelCase = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(A ) )
self.logger.log(A ,commit=A )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' ,state=A )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = get_batched_dataset(A ,self.args.batch_size )
UpperCAmelCase = len(A ) // self.args.batch_size
UpperCAmelCase = jnp.array(0 ,dtype=jnp.floataa )
UpperCAmelCase = 0
for batch in tqdm(A ,total=A ,desc="""Evaluating ... """ ):
UpperCAmelCase = self.data_collator(A )
UpperCAmelCase = self.val_step_fn(A ,**A )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = jax_utils.unreplicate(A )
print(F'''SAVING CHECKPOINT IN {save_dir}''' ,end=""" ... """ )
self.model_save_fn(A ,params=state.params )
with open(os.path.join(A ,"""opt_state.msgpack""" ) ,"""wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args ,os.path.join(A ,"""args.joblib""" ) )
joblib.dump(self.data_collator ,os.path.join(A ,"""data_collator.joblib""" ) )
with open(os.path.join(A ,"""training_state.json""" ) ,"""w""" ) as f:
json.dump({"""step""": state.step.item()} ,A )
print("""DONE""" )
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(_snake_case , """flax_model.msgpack""" ) , """rb""" ) as f:
UpperCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(_snake_case , """opt_state.msgpack""" ) , """rb""" ) as f:
UpperCAmelCase = from_bytes(state.opt_state , f.read() )
UpperCAmelCase = joblib.load(os.path.join(_snake_case , """args.joblib""" ) )
UpperCAmelCase = joblib.load(os.path.join(_snake_case , """data_collator.joblib""" ) )
with open(os.path.join(_snake_case , """training_state.json""" ) , """r""" ) as f:
UpperCAmelCase = json.load(_snake_case )
UpperCAmelCase = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def _a ( _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = num_train_steps - warmup_steps
UpperCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=_snake_case , transition_steps=_snake_case )
UpperCAmelCase = optax.linear_schedule(init_value=_snake_case , end_value=1E-7 , transition_steps=_snake_case )
UpperCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
def weight_decay_mask(_snake_case ):
UpperCAmelCase = traverse_util.flatten_dict(_snake_case )
UpperCAmelCase = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(_snake_case )
UpperCAmelCase = scheduler_fn(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = optax.adamw(learning_rate=_snake_case , weight_decay=_snake_case , mask=_snake_case )
return tx, lr
| 74
|
"""simple docstring"""
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCamelCase = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCamelCase = concatenate_datasets
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadManager
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadConfig
_UpperCamelCase = DownloadMode
_UpperCamelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 74
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def A ( self : Tuple ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
__snake_case = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case = CLIPTextModel(a_ )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : Any , a_ : Optional[Any] , a_ : List[Any]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((64, 64) )
__snake_case = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : Optional[Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionInpaintPipeline(**a_ )
__snake_case = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = sd_pipe(**a_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : Dict ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Any ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : int ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(a_ , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self : int ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = PNDMScheduler.from_pretrained(a_ , subfolder="scheduler" )
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , safety_checker=a_ , scheduler=a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 69
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _UpperCamelCase (a_ ):
snake_case_ = (PNDMScheduler,)
snake_case_ = (("""num_inference_steps""", 50),)
def __UpperCAmelCase ( self , **__UpperCamelCase )-> int:
__lowerCAmelCase = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__UpperCamelCase )
return config
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Optional[int]:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self )-> Dict:
pass
def __UpperCAmelCase ( self , __UpperCamelCase=0 , **__UpperCamelCase )-> Dict:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__UpperCamelCase )
__lowerCAmelCase = scheduler_class.from_pretrained(__UpperCamelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__UpperCamelCase )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = new_scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Tuple:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**__UpperCamelCase )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(__UpperCamelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__lowerCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
return sample
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop("num_inference_steps" , __UpperCamelCase )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(__UpperCamelCase , "set_timesteps" ):
scheduler.set_timesteps(__UpperCamelCase )
elif num_inference_steps is not None and not hasattr(__UpperCamelCase , "set_timesteps" ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__lowerCAmelCase = dummy_past_residuals[:]
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 0 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
__lowerCAmelCase = scheduler.step_plms(__UpperCamelCase , 1 , __UpperCamelCase , **__UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self )-> List[str]:
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__UpperCamelCase )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(steps_offset=1 )
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def __UpperCAmelCase ( self )-> int:
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def __UpperCAmelCase ( self )-> List[Any]:
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__UpperCamelCase )
def __UpperCAmelCase ( self )-> Optional[Any]:
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__UpperCamelCase )
def __UpperCAmelCase ( self )-> int:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
__lowerCAmelCase = 2_7
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(__UpperCamelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__lowerCAmelCase = scheduler.step_prk(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
def __UpperCAmelCase ( self )-> Dict:
with self.assertRaises(__UpperCamelCase ):
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = self.full_loop(prediction_type="v_prediction" )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def __UpperCAmelCase ( self )-> Any:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def __UpperCAmelCase ( self )-> Optional[Any]:
# We specify different beta, so that the first alpha is 0.99
__lowerCAmelCase = self.full_loop(set_alpha_to_one=__UpperCamelCase , beta_start=0.0_1 )
__lowerCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
__lowerCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 367
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( __a , unittest.TestCase ):
__A : Optional[int] = UnCLIPImageVariationPipeline
__A : Any = IMAGE_VARIATION_PARAMS - {'height', 'width', 'guidance_scale'}
__A : str = IMAGE_VARIATION_BATCH_PARAMS
__A : List[Any] = [
'generator',
'return_dict',
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
__A : Any = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = {
'''clip_embeddings_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''cross_attention_dim''': self.cross_attention_dim,
}
lowerCAmelCase_ = UnCLIPTextProjModel(**_lowerCamelCase )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = {
'''sample_size''': 32,
# RGB in channels
'''in_channels''': 3,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 6,
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': '''identity''',
}
lowerCAmelCase_ = UNetaDConditionModel(**_lowerCamelCase )
return model
@property
def UpperCAmelCase_ ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
lowerCAmelCase_ = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.dummy_decoder
lowerCAmelCase_ = self.dummy_text_proj
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = self.dummy_tokenizer
lowerCAmelCase_ = self.dummy_super_res_first
lowerCAmelCase_ = self.dummy_super_res_last
lowerCAmelCase_ = UnCLIPScheduler(
variance_type='''learned_range''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
lowerCAmelCase_ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''epsilon''' , num_train_timesteps=1000 , )
lowerCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
lowerCAmelCase_ = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase=0 , _lowerCamelCase=True ):
lowerCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCamelCase ) ).to(_lowerCamelCase )
if str(_lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase_ = torch.manual_seed(_lowerCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
if pil_image:
lowerCAmelCase_ = input_image * 0.5 + 0.5
lowerCAmelCase_ = input_image.clamp(0 , 1 )
lowerCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowerCAmelCase_ = DiffusionPipeline.numpy_to_pil(_lowerCamelCase )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = pipe(**_lowerCamelCase )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = pipe(
**_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[
0.99_97,
0.00_02,
0.99_97,
0.99_97,
0.99_69,
0.00_23,
0.99_97,
0.99_69,
0.99_70,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = pipe(**_lowerCamelCase )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = pipe(
**_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array([0.99_97, 0.00_03, 0.99_97, 0.99_97, 0.99_70, 0.00_24, 0.99_97, 0.99_71, 0.99_71] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = [
pipeline_inputs['''image'''],
pipeline_inputs['''image'''],
]
lowerCAmelCase_ = pipe(**_lowerCamelCase )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = [
tuple_pipeline_inputs['''image'''],
tuple_pipeline_inputs['''image'''],
]
lowerCAmelCase_ = pipe(
**_lowerCamelCase , return_dict=_lowerCamelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
lowerCAmelCase_ = np.array(
[
0.99_97,
0.99_89,
0.00_08,
0.00_21,
0.99_60,
0.00_18,
0.00_14,
0.00_02,
0.99_33,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch.device('''cpu''' )
class __UpperCAmelCase :
__A : Union[str, Any] = 1
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_lowerCamelCase )
lowerCAmelCase_ = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = torch.Generator(device=_lowerCamelCase ).manual_seed(0 )
lowerCAmelCase_ = pipe.decoder.dtype
lowerCAmelCase_ = 1
lowerCAmelCase_ = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
lowerCAmelCase_ = pipe.prepare_latents(
_lowerCamelCase , dtype=_lowerCamelCase , device=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , scheduler=DummyScheduler() )
lowerCAmelCase_ = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
lowerCAmelCase_ = pipe.prepare_latents(
_lowerCamelCase , dtype=_lowerCamelCase , device=_lowerCamelCase , generator=_lowerCamelCase , latents=_lowerCamelCase , scheduler=DummyScheduler() )
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
lowerCAmelCase_ = pipe(
**_lowerCamelCase , decoder_latents=_lowerCamelCase , super_res_latents=_lowerCamelCase ).images
lowerCAmelCase_ = self.get_dummy_inputs(_lowerCamelCase , pil_image=_lowerCamelCase )
# Don't pass image, instead pass embedding
lowerCAmelCase_ = pipeline_inputs.pop('''image''' )
lowerCAmelCase_ = pipe.image_encoder(_lowerCamelCase ).image_embeds
lowerCAmelCase_ = pipe(
**_lowerCamelCase , decoder_latents=_lowerCamelCase , super_res_latents=_lowerCamelCase , image_embeddings=_lowerCamelCase , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch_device == '''cpu'''
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
lowerCAmelCase_ = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=_lowerCamelCase , expected_max_diff=_lowerCamelCase )
@skip_mps
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = torch_device == '''cpu'''
lowerCAmelCase_ = True
lowerCAmelCase_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
self._test_inference_batch_single_identical(
test_max_difference=_lowerCamelCase , relax_max_difference=_lowerCamelCase , additional_params_copy_to_batched_inputs=_lowerCamelCase , )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = [
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
lowerCAmelCase_ = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_lowerCamelCase , additional_params_copy_to_batched_inputs=_lowerCamelCase , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_lowerCamelCase )
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png''' )
lowerCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/unclip/karlo_v1_alpha_cat_variation_fp16.npy''' )
lowerCAmelCase_ = UnCLIPImageVariationPipeline.from_pretrained(
'''kakaobrain/karlo-v1-alpha-image-variations''' , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipeline.to(_lowerCamelCase )
pipeline.set_progress_bar_config(disable=_lowerCamelCase )
lowerCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ = pipeline(
_lowerCamelCase , generator=_lowerCamelCase , output_type='''np''' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_lowerCamelCase , _lowerCamelCase , 15 )
| 606
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ : List[str] =logging.get_logger(__name__)
A_ : int ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ : Dict =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def snake_case_ ( __snake_case : List[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str) -> List[Any]:
for attribute in key.split('''.'''):
lowerCAmelCase_ = getattr(__snake_case , __snake_case)
if weight_type is not None:
lowerCAmelCase_ = getattr(__snake_case , __snake_case).shape
else:
lowerCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''')
if weight_type == "weight":
lowerCAmelCase_ = value
elif weight_type == "weight_g":
lowerCAmelCase_ = value
elif weight_type == "weight_v":
lowerCAmelCase_ = value
elif weight_type == "bias":
lowerCAmelCase_ = value
elif weight_type == "running_mean":
lowerCAmelCase_ = value
elif weight_type == "running_var":
lowerCAmelCase_ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase_ = value
elif weight_type == "inv_freq":
lowerCAmelCase_ = value
else:
lowerCAmelCase_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''')
def snake_case_ ( __snake_case : List[Any] , __snake_case : Tuple , __snake_case : Any) -> Union[str, Any]:
lowerCAmelCase_ = []
lowerCAmelCase_ = fairseq_model.state_dict()
lowerCAmelCase_ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase_ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''')[-1] == name.split('''.''')[0]:
lowerCAmelCase_ = True
if "*" in mapped_key:
lowerCAmelCase_ = name.split(__snake_case)[0].split('''.''')[-2]
lowerCAmelCase_ = mapped_key.replace('''*''' , __snake_case)
if "pos_bias_u" in name:
lowerCAmelCase_ = None
elif "pos_bias_v" in name:
lowerCAmelCase_ = None
elif "weight_g" in name:
lowerCAmelCase_ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase_ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase_ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase_ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase_ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase_ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase_ = '''num_batches_tracked'''
else:
lowerCAmelCase_ = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case)
continue
if not is_used:
unused_weights.append(__snake_case)
logger.warning(F'''Unused weights: {unused_weights}''')
def snake_case_ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : Tuple , __snake_case : Optional[Any]) -> List[Any]:
lowerCAmelCase_ = full_name.split('''conv_layers.''')[-1]
lowerCAmelCase_ = name.split('''.''')
lowerCAmelCase_ = int(items[0])
lowerCAmelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''')
lowerCAmelCase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''')
else:
unused_weights.append(__snake_case)
@torch.no_grad()
def snake_case_ ( __snake_case : Any , __snake_case : List[Any] , __snake_case : List[str]=None , __snake_case : int=None , __snake_case : Union[str, Any]=True) -> Union[str, Any]:
if config_path is not None:
lowerCAmelCase_ = WavaVecaConformerConfig.from_pretrained(__snake_case , hidden_act='''swish''')
else:
lowerCAmelCase_ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase_ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase_ = Dictionary.load(__snake_case)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase_ = target_dict.pad_index
lowerCAmelCase_ = target_dict.bos_index
lowerCAmelCase_ = target_dict.eos_index
lowerCAmelCase_ = len(target_dict.symbols)
lowerCAmelCase_ = os.path.join(__snake_case , '''vocab.json''')
if not os.path.isdir(__snake_case):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__snake_case))
return
os.makedirs(__snake_case , exist_ok=__snake_case)
lowerCAmelCase_ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase_ = 0
lowerCAmelCase_ = 1
with open(__snake_case , '''w''' , encoding='''utf-8''') as vocab_handle:
json.dump(__snake_case , __snake_case)
lowerCAmelCase_ = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__snake_case , )
lowerCAmelCase_ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
lowerCAmelCase_ = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case)
processor.save_pretrained(__snake_case)
lowerCAmelCase_ = WavaVecaConformerForCTC(__snake_case)
else:
lowerCAmelCase_ = WavaVecaConformerForPreTraining(__snake_case)
if is_finetuned:
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''')[:-1])})
else:
lowerCAmelCase_ = argparse.Namespace(task='''audio_pretraining''')
lowerCAmelCase_ = fairseq.tasks.setup_task(__snake_case)
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case)
lowerCAmelCase_ = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned)
hf_wavavec.save_pretrained(__snake_case)
if __name__ == "__main__":
A_ : Any =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ : Union[str, Any] =parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 606
| 1
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 52
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
SCREAMING_SNAKE_CASE__ = model(_snake_case )["last_hidden_state"]
SCREAMING_SNAKE_CASE__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _snake_case )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 159
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_a : Dict= logging.get_logger(__name__)
_a : str= {"tokenizer_file": "tokenizer.json"}
_a : str= {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class UpperCamelCase ( lowercase ):
UpperCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
UpperCAmelCase : Optional[int] = None
def __init__(self : List[Any] , _A : Optional[Any]=None , _A : Any=None , _A : Dict=None , _A : Optional[Any]="<unk>" , _A : str="<s>" , _A : Dict="</s>" , _A : Union[str, Any]="<pad>" , _A : Any=False , _A : List[str]=False , **_A : str , ) -> List[Any]:
super().__init__(
__a , __a , tokenizer_file=__a , unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , add_prefix_space=__a , clean_up_tokenization_spaces=__a , **__a , )
__snake_case : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
if pre_tok_state.get('add_prefix_space' , __a) != add_prefix_space:
__snake_case : List[Any] = getattr(__a , pre_tok_state.pop('type'))
__snake_case : int = add_prefix_space
__snake_case : Union[str, Any] = pre_tok_class(**__a)
__snake_case : Tuple = add_prefix_space
def _lowercase (self : str , *_A : List[Any] , **_A : Tuple) -> BatchEncoding:
__snake_case : List[str] = kwargs.get('is_split_into_words' , __a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.')
return super()._batch_encode_plus(*__a , **__a)
def _lowercase (self : List[str] , *_A : Optional[int] , **_A : List[str]) -> BatchEncoding:
__snake_case : Dict = kwargs.get('is_split_into_words' , __a)
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
' pretokenized inputs.')
return super()._encode_plus(*__a , **__a)
def _lowercase (self : Union[str, Any] , _A : Optional[Any] , _A : str = None) -> Tuple[str]:
__snake_case : Union[str, Any] = self._tokenizer.model.save(__a , name=__a)
return tuple(__a)
def _lowercase (self : int , _A : List[Any]) -> List[int]:
__snake_case : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a , add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
__snake_case : int = input_ids[-self.model_max_length :]
return input_ids
| 712
|
"""simple docstring"""
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> list[int]:
'''simple docstring'''
__snake_case : Union[str, Any] = [True] * limit
__snake_case : Tuple = False
__snake_case : Union[str, Any] = False
__snake_case : List[str] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__snake_case : List[str] = i * 2
while index < limit:
__snake_case : List[str] = False
__snake_case : Union[str, Any] = index + i
__snake_case : str = [2]
for i in range(3 , UpperCAmelCase_ , 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase_ )
return primes
def __UpperCAmelCase ( UpperCAmelCase_ : int = 1_00_00_00 ) -> int:
'''simple docstring'''
__snake_case : Tuple = prime_sieve(UpperCAmelCase_ )
__snake_case : int = 0
__snake_case : str = 0
for i in range(len(UpperCAmelCase_ ) ):
for j in range(i + length , len(UpperCAmelCase_ ) ):
__snake_case : Union[str, Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__snake_case : List[Any] = j - i
__snake_case : Optional[Any] = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 192
| 0
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ = "▁" , a__ = True , a__ = "<unk>" , a__ = "</s>" , a__ = "<pad>" , ):
_lowerCAmelCase : List[str] = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
_lowerCAmelCase : Union[str, Any] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Optional[Any] = token_dict["""token"""]
_lowerCAmelCase : List[str] = Tokenizer(Unigram() )
_lowerCAmelCase : Dict = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
_lowerCAmelCase : Any = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=a__ , add_prefix_space=a__ ),
pre_tokenizers.Digits(individual_digits=a__ ),
pre_tokenizers.Punctuation(),
] )
_lowerCAmelCase : int = decoders.Metaspace(replacement=a__ , add_prefix_space=a__ )
_lowerCAmelCase : Any = TemplateProcessing(
single=F"$A {self.special_tokens['eos']['token']}" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
_lowerCAmelCase : Tuple = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(a__ , a__ )
def __A ( self , a__ , a__ = 8000 , a__ = True , ):
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
if isinstance(a__ , a__ ):
_lowerCAmelCase : List[Any] = [files]
self._tokenizer.train(a__ , trainer=a__ )
self.add_unk_id()
def __A ( self , a__ , a__ = 8000 , a__ = True , ):
_lowerCAmelCase : Optional[Any] = trainers.UnigramTrainer(
vocab_size=a__ , special_tokens=self.special_tokens_list , show_progress=a__ , )
self._tokenizer.train_from_iterator(a__ , trainer=a__ )
self.add_unk_id()
def __A ( self ):
_lowerCAmelCase : List[str] = json.loads(self._tokenizer.to_str() )
_lowerCAmelCase : List[Any] = self.special_tokens["""unk"""]["""id"""]
_lowerCAmelCase : List[Any] = Tokenizer.from_str(json.dumps(a__ ) )
| 213
|
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : int=0 ) -> List[str]:
# Format the message.
if name is None:
_lowerCAmelCase : Optional[Any] = None
else:
_lowerCAmelCase : int = """.""" * max(0 ,spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
_lowerCAmelCase : int = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase ,_lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase ,val[k] ,spaces + 2 )
elif isinstance(_lowerCamelCase ,torch.Tensor ):
print(_lowerCamelCase ,""":""" ,val.size() )
else:
print(_lowerCamelCase ,""":""" ,_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple ,_lowerCamelCase : Optional[Any] ) -> int:
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
_lowerCAmelCase : str = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
_lowerCAmelCase : int = (num_heads, hidden_size, num_splits) + input_shape[1:]
_lowerCAmelCase : Tuple = param.view(*_lowerCamelCase )
_lowerCAmelCase : str = param.transpose(0 ,2 )
_lowerCAmelCase : str = param.transpose(1 ,2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
_lowerCAmelCase : List[str] = (num_heads, num_splits, hidden_size) + input_shape[1:]
_lowerCAmelCase : str = param.view(*_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = param.transpose(0 ,1 ).contiguous()
_lowerCAmelCase : Optional[Any] = param.view(*_lowerCamelCase )
return param
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ) -> Any:
# The converted output model.
_lowerCAmelCase : Optional[int] = {}
# old versions did not store training args
_lowerCAmelCase : Dict = input_state_dict.get("""args""" ,_lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
_lowerCAmelCase : Optional[Any] = ds_args.padded_vocab_size
_lowerCAmelCase : Tuple = ds_args.max_position_embeddings
_lowerCAmelCase : Optional[Any] = ds_args.hidden_size
_lowerCAmelCase : Union[str, Any] = ds_args.num_layers
_lowerCAmelCase : Dict = ds_args.num_attention_heads
_lowerCAmelCase : Optional[Any] = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
_lowerCAmelCase : List[str] = config.n_head
# The hidden_size per head.
_lowerCAmelCase : Any = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
_lowerCAmelCase : Tuple = input_state_dict["""checkpoint_version"""]
else:
_lowerCAmelCase : Union[str, Any] = 0.0
# The model.
_lowerCAmelCase : Any = input_state_dict["""model"""]
# The language model.
_lowerCAmelCase : Any = model["""language_model"""]
# The embeddings.
_lowerCAmelCase : Union[str, Any] = lm["""embedding"""]
# The word embeddings.
_lowerCAmelCase : int = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
_lowerCAmelCase : Dict = word_embeddings[: config.vocab_size, :]
_lowerCAmelCase : Optional[int] = word_embeddings
# The position embeddings.
_lowerCAmelCase : Tuple = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
_lowerCAmelCase : Union[str, Any] = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
f"pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don't match" )
# Store the position embeddings.
_lowerCAmelCase : Optional[Any] = pos_embeddings
# The transformer.
_lowerCAmelCase : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
_lowerCAmelCase : Any = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
_lowerCAmelCase : Optional[Any] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
_lowerCAmelCase : Tuple = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
_lowerCAmelCase : Optional[int] = int(m.group(1 ) )
# The name of the operation.
_lowerCAmelCase : Tuple = m.group(2 )
# Is it a weight or a bias?
_lowerCAmelCase : List[Any] = m.group(3 )
# The name of the layer.
_lowerCAmelCase : str = f"transformer.h.{layer_idx}"
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
_lowerCAmelCase : Optional[Any] = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
_lowerCAmelCase : List[Any] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
_lowerCAmelCase : Optional[int] = torch.tril(torch.ones((n_positions, n_positions) ,dtype=torch.floataa ) ).view(
1 ,1 ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
_lowerCAmelCase : Dict = torch.tensor(-1e4 ,dtype=torch.floataa )
_lowerCAmelCase : Dict = masked_bias
_lowerCAmelCase : List[Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
_lowerCAmelCase : int = out_val.transpose(0 ,1 ).contiguous()
# Store.
_lowerCAmelCase : List[str] = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
_lowerCAmelCase : Union[str, Any] = fix_query_key_value_ordering(_lowerCamelCase ,_lowerCamelCase ,3 ,_lowerCamelCase ,_lowerCamelCase )
# Store. No change of shape.
_lowerCAmelCase : str = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
_lowerCAmelCase : Any = megatron_to_transformers[op_name]
_lowerCAmelCase : Optional[Any] = val.transpose(0 ,1 )
# Copy the bias.
elif weight_or_bias == "bias":
_lowerCAmelCase : str = megatron_to_transformers[op_name]
_lowerCAmelCase : Union[str, Any] = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
_lowerCAmelCase : int = transformer["""final_layernorm.weight"""]
_lowerCAmelCase : Union[str, Any] = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
_lowerCAmelCase : int = word_embeddings
# It should be done!
return output_state_dict
def SCREAMING_SNAKE_CASE ( ) -> List[str]:
# Create the argument parser.
_lowerCAmelCase : int = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" ,action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" ,type=_lowerCamelCase ,help="""Path to the checkpoint file (.zip archive or direct .pt file)""" ,)
parser.add_argument(
"""--config_file""" ,default="""""" ,type=_lowerCamelCase ,help="""An optional config json file describing the pre-trained model.""" ,)
_lowerCAmelCase : List[Any] = parser.parse_args()
# Extract the basename.
_lowerCAmelCase : Optional[int] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(f"Extracting PyTorch state dictionary from {args.path_to_checkpoint}" )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint ,"""r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
_lowerCAmelCase : Any = torch.load(_lowerCamelCase ,map_location="""cpu""" )
else:
_lowerCAmelCase : Optional[int] = torch.load(args.path_to_checkpoint ,map_location="""cpu""" )
_lowerCAmelCase : Optional[int] = input_state_dict.get("""args""" ,_lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
_lowerCAmelCase : Optional[Any] = """gelu_fast"""
elif ds_args.openai_gelu:
_lowerCAmelCase : Any = """gelu_new"""
else:
_lowerCAmelCase : str = """gelu"""
else:
# in the very early days this used to be "gelu_new"
_lowerCAmelCase : Any = """gelu_new"""
# Spell out all parameters in case the defaults change.
_lowerCAmelCase : Tuple = GPTaConfig(
vocab_size=50257 ,n_positions=1024 ,n_embd=1024 ,n_layer=24 ,n_head=16 ,n_inner=4096 ,activation_function=_lowerCamelCase ,resid_pdrop=0.1 ,embd_pdrop=0.1 ,attn_pdrop=0.1 ,layer_norm_epsilon=1e-5 ,initializer_range=0.02 ,summary_type="""cls_index""" ,summary_use_proj=_lowerCamelCase ,summary_activation=_lowerCamelCase ,summary_proj_to_labels=_lowerCamelCase ,summary_first_dropout=0.1 ,scale_attn_weights=_lowerCamelCase ,use_cache=_lowerCamelCase ,bos_token_id=50256 ,eos_token_id=50256 ,)
else:
_lowerCAmelCase : Optional[Any] = GPTaConfig.from_json_file(args.config_file )
_lowerCAmelCase : Tuple = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
_lowerCAmelCase : Tuple = convert_megatron_checkpoint(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase ,_lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
_lowerCAmelCase : Optional[Any] = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
_lowerCAmelCase : Dict = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
_lowerCAmelCase : List[str] = ds_args.tokenizer_name_or_path
else:
raise ValueError(f"Unrecognized tokenizer_type {tokenizer_type}" )
else:
_lowerCAmelCase : Optional[Any] = """gpt2"""
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = type(_lowerCamelCase ).__name__
_lowerCAmelCase : Dict = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(f"Adding {tokenizer_class} tokenizer files" )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
_lowerCAmelCase : List[str] = os.path.join(_lowerCamelCase ,"""pytorch_model.bin""" )
print(f"Saving checkpoint to \"{output_checkpoint_file}\"" )
torch.save(_lowerCamelCase ,_lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 213
| 1
|
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
for param in module.parameters():
A__ : Optional[Any] =False
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
A__ : Union[str, Any] ="mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
A__ : Union[str, Any] =plt.imshow(UpperCamelCase )
fig.axes.get_xaxis().set_visible(UpperCamelCase )
fig.axes.get_yaxis().set_visible(UpperCamelCase )
plt.show()
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] =datetime.now()
A__ : Dict =current_time.strftime("%H:%M:%S" )
return timestamp
| 595
|
"""simple docstring"""
from __future__ import annotations
__A : Union[str, Any] = []
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase , -1 , -1 ) , range(UpperCamelCase , len(UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int ):
"""simple docstring"""
if row >= len(UpperCamelCase ):
solution.append(UpperCamelCase )
printboard(UpperCamelCase )
print()
return True
for i in range(len(UpperCamelCase ) ):
if is_safe(UpperCamelCase , UpperCamelCase , UpperCamelCase ):
A__ : Optional[Any] =1
solve(UpperCamelCase , row + 1 )
A__ : Union[str, Any] =0
return False
def lowercase ( UpperCamelCase : list[list[int]] ):
"""simple docstring"""
for i in range(len(UpperCamelCase ) ):
for j in range(len(UpperCamelCase ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
__A : List[Any] = 8
__A : Dict = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 595
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = KandinskyVaaControlnetImgaImgPipeline
UpperCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
UpperCamelCase_ = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
UpperCamelCase_ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase_ = False
@property
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Any ) -> List[str]:
'''simple docstring'''
return 32
@property
def UpperCAmelCase_ ( self : Tuple ) -> Tuple:
'''simple docstring'''
return self.time_input_dim
@property
def UpperCAmelCase_ ( self : Tuple ) -> Any:
'''simple docstring'''
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return 1_00
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Tuple = {
"in_channels": 8,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image_hint",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase_ : Any = UNetaDConditionModel(**lowerCAmelCase__ )
return model
@property
def UpperCAmelCase_ ( self : Any ) -> str:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ : Any = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : List[Any] = self.dummy_unet
lowerCAmelCase_ : Tuple = self.dummy_movq
lowerCAmelCase_ : Tuple = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.00_085,
"beta_end": 0.012,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
lowerCAmelCase_ : Dict = DDIMScheduler(**lowerCAmelCase__ )
lowerCAmelCase_ : Any = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def UpperCAmelCase_ ( self : List[str] ,lowerCAmelCase__ : Any ,lowerCAmelCase__ : Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ : Any = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
lowerCAmelCase__ )
# create init_image
lowerCAmelCase_ : Optional[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
lowerCAmelCase_ : int = image.cpu().permute(0 ,2 ,3 ,1 )[0]
lowerCAmelCase_ : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create hint
lowerCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
lowerCAmelCase_ : Optional[Any] = torch.manual_seed(lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[str] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
lowerCAmelCase_ : Any = {
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"hint": hint,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def UpperCAmelCase_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = "cpu"
lowerCAmelCase_ : Any = self.get_dummy_components()
lowerCAmelCase_ : List[str] = self.pipeline_class(**lowerCAmelCase__ )
lowerCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : List[Any] = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) )
lowerCAmelCase_ : Optional[Any] = output.images
lowerCAmelCase_ : Optional[Any] = pipe(
**self.get_dummy_inputs(lowerCAmelCase__ ) ,return_dict=lowerCAmelCase__ ,)[0]
lowerCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase_ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ : str = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self : str ) -> Any:
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" )
lowerCAmelCase_ : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase_ : str = init_image.resize((5_12, 5_12) )
lowerCAmelCase_ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/hint_image_cat.png" )
lowerCAmelCase_ : List[str] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0
lowerCAmelCase_ : Optional[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
lowerCAmelCase_ : Any = "A robot, 4k photo"
lowerCAmelCase_ : Union[str, Any] = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" ,torch_dtype=torch.floataa )
pipe_prior.to(lowerCAmelCase__ )
lowerCAmelCase_ : Optional[int] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-controlnet-depth" ,torch_dtype=torch.floataa )
lowerCAmelCase_ : int = pipeline.to(lowerCAmelCase__ )
pipeline.set_progress_bar_config(disable=lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ : str = pipe_prior(
lowerCAmelCase__ ,image=lowerCAmelCase__ ,strength=0.85 ,generator=lowerCAmelCase__ ,negative_prompt="" ,).to_tuple()
lowerCAmelCase_ : str = pipeline(
image=lowerCAmelCase__ ,image_embeds=lowerCAmelCase__ ,negative_image_embeds=lowerCAmelCase__ ,hint=lowerCAmelCase__ ,generator=lowerCAmelCase__ ,num_inference_steps=1_00 ,height=5_12 ,width=5_12 ,strength=0.5 ,output_type="np" ,)
lowerCAmelCase_ : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCAmelCase__ ,lowerCAmelCase__ )
| 659
|
from typing import TYPE_CHECKING
from ....utils import _LazyModule
_lowercase = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 659
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self :int , __magic_name__ :int , __magic_name__ :int=13 , __magic_name__ :Optional[Any]=7 , __magic_name__ :Union[str, Any]=True , __magic_name__ :List[Any]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :Optional[Any]=True , __magic_name__ :Optional[int]=99 , __magic_name__ :Dict=32 , __magic_name__ :Optional[int]=5 , __magic_name__ :List[Any]=4 , __magic_name__ :Optional[Any]=4 , __magic_name__ :Any="gelu" , __magic_name__ :List[Any]=0.0 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :Tuple=True , __magic_name__ :Any=512 , __magic_name__ :str=16 , __magic_name__ :Union[str, Any]=2 , __magic_name__ :Optional[Any]=0.02 , __magic_name__ :List[Any]=3 , __magic_name__ :Union[str, Any]=4 , __magic_name__ :List[str]=None , ) -> Union[str, Any]:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_multiple_size
a__ = hidden_act
a__ = hidden_dropout
a__ = attention_dropout
a__ = weight_tying
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def _UpperCamelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
a__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ = self.get_config()
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self :List[str] ) -> List[Any]:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self :Any ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ = self.prepare_config_and_inputs()
a__ = True
return config, input_ids, input_mask, token_labels
def _UpperCamelCase ( self :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :str ) -> Dict:
'''simple docstring'''
a__ = GPTNeoXJapaneseModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ )
a__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :List[str] ) -> Dict:
'''simple docstring'''
a__ = True
a__ = GPTNeoXJapaneseModel(__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self :Optional[Any] , __magic_name__ :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :str , __magic_name__ :List[str] ) -> int:
'''simple docstring'''
a__ = GPTNeoXJapaneseForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a__ = model(__magic_name__ , attention_mask=__magic_name__ , labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self :Union[str, Any] , __magic_name__ :int , __magic_name__ :List[Any] , __magic_name__ :Optional[int] ) -> int:
'''simple docstring'''
a__ = True
a__ = GPTNeoXJapaneseForCausalLM(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
# first forward pass
a__ = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] , dim=-1 )
a__ = torch.cat([input_mask, next_mask] , dim=-1 )
a__ = model(__magic_name__ , attention_mask=__magic_name__ , output_hidden_states=__magic_name__ )
a__ = output_from_no_past['''hidden_states'''][0]
a__ = model(
__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ , output_hidden_states=__magic_name__ , )['''hidden_states'''][0]
# select random slice
a__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1e-3 ) )
def _UpperCamelCase ( self :List[str] ) -> Union[str, Any]:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ = config_and_inputs
a__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Optional[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
snake_case__ : str = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
snake_case__ : List[str] = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
snake_case__ : Tuple = False
snake_case__ : Any = False
snake_case__ : str = False
snake_case__ : Tuple = False
def _UpperCamelCase ( self :Optional[int] ) -> Tuple:
'''simple docstring'''
a__ = GPTNeoXJapaneseModelTester(self )
a__ = ConfigTester(self , config_class=__magic_name__ , hidden_size=37 )
def _UpperCamelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self :Tuple ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(__magic_name__ , __magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :Dict ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :Union[str, Any] ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs_for_decoder()
a__ = None
self.model_tester.create_and_check_model_as_decoder(__magic_name__ , __magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :str ) -> List[Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(__magic_name__ , __magic_name__ , __magic_name__ )
def _UpperCamelCase ( self :List[Any] ) -> List[Any]:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*__magic_name__ )
@slow
def _UpperCamelCase ( self :str ) -> List[str]:
'''simple docstring'''
a__ = '''abeja/gpt-neox-japanese-2.7b'''
a__ = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
a__ = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
a__ = GPTNeoXJapaneseTokenizer.from_pretrained(__magic_name__ )
a__ = GPTNeoXJapaneseForCausalLM.from_pretrained(__magic_name__ )
a__ = []
for prompt in prompts:
a__ = tokenizer(__magic_name__ , return_tensors='''pt''' ).input_ids
a__ = model.generate(__magic_name__ , max_length=50 )
a__ = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
predicted_outputs += generated_string
self.assertListEqual(__magic_name__ , __magic_name__ )
| 709
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCAmelCase : str = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Tuple = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 158
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_a : Union[str, Any] = OpenAIGPTTokenizer
_a : Tuple = OpenAIGPTTokenizerFast
_a : Tuple = True
_a : Tuple = False
def UpperCAmelCase__( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Any = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowercase__ : int = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
lowercase__ : int = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowercase__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) )
with open(self.merges_file , """w""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase__ ) )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
return "lower newer", "lower newer"
def UpperCAmelCase__( self ) -> Optional[Any]:
lowercase__ : List[str] = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowercase__ : Tuple = "lower"
lowercase__ : int = ["low", "er</w>"]
lowercase__ : str = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__ : List[Any] = tokens + ["<unk>"]
lowercase__ : str = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase__ : List[str] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
# Simple input
lowercase__ : List[str] = "This is a simple input"
lowercase__ : Any = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Simple input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(lowerCAmelCase__ , tokenizer_r.encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" )
# Pair input
self.assertRaises(
lowerCAmelCase__ , tokenizer_r.batch_encode_plus , lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="""max_length""" , )
def UpperCAmelCase__( self ) -> Union[str, Any]:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( snake_case__ ):
"""simple docstring"""
pass
| 200
|
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
_lowercase = True
from torch.cuda.amp import autocast
_lowercase = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Whether to log verbose messages or not.'} , )
UpperCamelCase_ = field(
default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} )
UpperCamelCase_ = field(
default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} )
def UpperCamelCase ( snake_case__ , snake_case__):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout)] , )
lowerCAmelCase_ : str = logging.WARNING
if model_args.verbose_logging:
lowerCAmelCase_ : int = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank):
lowerCAmelCase_ : Any = logging.INFO
logger.setLevel(snake_case__)
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
UpperCamelCase_ = field(
default='validation' , metadata={
'help': (
'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
UpperCamelCase_ = field(
default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ = field(
default=1 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
UpperCamelCase_ = field(
default=snake_case__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
UpperCamelCase_ = field(
default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} )
@dataclass
class __snake_case :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
UpperCamelCase_ = "longest"
UpperCamelCase_ = None
UpperCamelCase_ = None
def __call__( self : str ,lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
lowerCAmelCase_ : Tuple = self.feature_extractor.pad(
lowerCAmelCase__ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="pt" ,)
lowerCAmelCase_ : Union[str, Any] = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1] )
lowerCAmelCase_ : List[str] = batch["input_values"].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowerCAmelCase_ : Tuple = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1 ) ).to(
torch.long )
lowerCAmelCase_ : Optional[Any] = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["input_values"].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowerCAmelCase_ : str = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCAmelCase__ ,min_masks=2 ,)
return batch
class __snake_case ( snake_case__ ):
"""simple docstring"""
def __init__( self : List[str] ,*lowerCAmelCase__ : Optional[int] ,lowerCAmelCase__ : Tuple=1 ,lowerCAmelCase__ : Optional[int]=0 ,lowerCAmelCase__ : Optional[Any]=1.0 ,**lowerCAmelCase__ : Any ) -> str:
'''simple docstring'''
super().__init__(*lowerCAmelCase__ ,**lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : int = max_gumbel_temp
lowerCAmelCase_ : Union[str, Any] = min_gumbel_temp
lowerCAmelCase_ : str = gumbel_temp_decay
def UpperCAmelCase_ ( self : Tuple ,lowerCAmelCase__ : nn.Module ,lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor:
'''simple docstring'''
model.train()
lowerCAmelCase_ : str = self._prepare_inputs(lowerCAmelCase__ )
if self.use_amp:
with autocast():
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
else:
lowerCAmelCase_ : List[Any] = self.compute_loss(lowerCAmelCase__ ,lowerCAmelCase__ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowerCAmelCase_ : List[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowerCAmelCase_ : Optional[Any] = loss.sum() / (inputs["mask_time_indices"]).sum()
else:
raise ValueError(f'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
lowerCAmelCase_ : int = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCAmelCase__ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCAmelCase__ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCAmelCase__ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase_ : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Dict = parser.parse_args_into_dataclasses()
configure_logger(snake_case__ , snake_case__)
# Downloading and loading a dataset from the hub.
lowerCAmelCase_ : List[str] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir)
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Any = DatasetDict()
lowerCAmelCase_ : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[str] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowerCAmelCase_ : Union[str, Any] = DatasetDict()
lowerCAmelCase_ : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="validation" , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowerCAmelCase_ : Dict = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=snake_case__)
def prepare_dataset(snake_case__):
# check that all files have the correct sampling rate
lowerCAmelCase_ , lowerCAmelCase_ : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate)
return batch
# load audio files into numpy arrays
lowerCAmelCase_ : int = datasets.map(
snake_case__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["train"].column_names)
# filter audio files that are too long
lowerCAmelCase_ : int = vectorized_datasets.filter(
lambda snake_case__: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate))
def normalize(snake_case__):
return feature_extractor(batch["speech"] , sampling_rate=feature_extractor.sampling_rate)
# normalize and transform to `BatchFeatures`
lowerCAmelCase_ : str = vectorized_datasets.map(
snake_case__ , batched=snake_case__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["train"].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowerCAmelCase_ : Optional[Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"
" ``config.feat_extract_norm='layer'")
lowerCAmelCase_ : Dict = WavaVecaForPreTraining(snake_case__)
lowerCAmelCase_ : int = DataCollatorForWavaVecaPretraining(model=snake_case__ , feature_extractor=snake_case__)
lowerCAmelCase_ : List[Any] = WavaVecaPreTrainer(
model=snake_case__ , data_collator=snake_case__ , args=snake_case__ , train_dataset=vectorized_datasets["train"] , eval_dataset=vectorized_datasets["validation"] , tokenizer=snake_case__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 659
| 0
|
'''simple docstring'''
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
__magic_name__ : List[str] = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def lowerCAmelCase ( snake_case__ : str = "dhaka" , snake_case__ : int = 5 )-> Optional[Any]:
A_ = min(__lowerCAmelCase , 50 ) # Prevent abuse!
A_ = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
A_ = requests.get("https://www.google.com/search" , params=__lowerCAmelCase , headers=__lowerCAmelCase )
A_ = BeautifulSoup(html.text , "html.parser" )
A_ = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);" , str(soup.select("script" ) ) ) )
A_ = json.dumps(__lowerCAmelCase )
A_ = json.loads(__lowerCAmelCase )
A_ = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\"," , __lowerCAmelCase , )
if not matched_google_image_data:
return 0
A_ = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]" , "" , str(__lowerCAmelCase ) , )
A_ = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]" , __lowerCAmelCase , )
for index, fixed_full_res_image in enumerate(__lowerCAmelCase ):
if index >= max_images:
return index
A_ = bytes(__lowerCAmelCase , "ascii" ).decode(
"unicode-escape" )
A_ = bytes(__lowerCAmelCase , "ascii" ).decode(
"unicode-escape" )
A_ = urllib.request.build_opener()
A_ = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(__lowerCAmelCase )
A_ = f'query_{query.replace(" " , "_" )}'
if not os.path.exists(__lowerCAmelCase ):
os.makedirs(__lowerCAmelCase )
urllib.request.urlretrieve( # noqa: S310
__lowerCAmelCase , f'{path_name}/original_size_img_{index}.jpg' )
return index
if __name__ == "__main__":
try:
__magic_name__ : Optional[Any] = download_images_from_google_query(sys.argv[1])
print(f"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 709
|
from __future__ import annotations
__magic_name__ : List[Any] = 8.9_8_8e9 # units = N * m^s * C^-2
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float )-> dict[str, float]:
A_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
A_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A_ = (COULOMBS_CONSTANT * charge_product / abs(snake_case__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608
| 0
|
"""simple docstring"""
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
__magic_name__ = "src/transformers"
__magic_name__ = "docs/source/en"
__magic_name__ = "."
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
with open(UpperCAmelCase__,'r',encoding='utf-8',newline='\n' ) as f:
a__ = f.readlines()
# Find the start prompt.
a__ = 0
while not lines[start_index].startswith(UpperCAmelCase__ ):
start_index += 1
start_index += 1
a__ = start_index
while not lines[end_index].startswith(UpperCAmelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
__magic_name__ = "Model|Encoder|Decoder|ForConditionalGeneration"
# Regexes that match TF/Flax/PT model names.
__magic_name__ = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
__magic_name__ = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
__magic_name__ = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
__magic_name__ = direct_transformers_import(TRANSFORMERS_PATH)
def _lowerCamelCase ( UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',UpperCAmelCase__ )
return [m.group(0 ) for m in matches]
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__ ) -> List[str]:
'''simple docstring'''
a__ = 2 if text == '✅' or text == '❌' else len(UpperCAmelCase__ )
a__ = (width - text_length) // 2
a__ = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _lowerCamelCase ( ) -> Any:
'''simple docstring'''
a__ = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a__ = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
a__ = {name: config.replace('Config','' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
a__ = collections.defaultdict(UpperCAmelCase__ )
a__ = collections.defaultdict(UpperCAmelCase__ )
a__ = collections.defaultdict(UpperCAmelCase__ )
a__ = collections.defaultdict(UpperCAmelCase__ )
a__ = collections.defaultdict(UpperCAmelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(UpperCAmelCase__ ):
a__ = None
if attr_name.endswith('Tokenizer' ):
a__ = slow_tokenizers
a__ = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
a__ = fast_tokenizers
a__ = attr_name[:-13]
elif _re_tf_models.match(UpperCAmelCase__ ) is not None:
a__ = tf_models
a__ = _re_tf_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_flax_models.match(UpperCAmelCase__ ) is not None:
a__ = flax_models
a__ = _re_flax_models.match(UpperCAmelCase__ ).groups()[0]
elif _re_pt_models.match(UpperCAmelCase__ ) is not None:
a__ = pt_models
a__ = _re_pt_models.match(UpperCAmelCase__ ).groups()[0]
if lookup_dict is not None:
while len(UpperCAmelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
a__ = True
break
# Try again after removing the last word in the name
a__ = ''.join(camel_case_split(UpperCAmelCase__ )[:-1] )
# Let's build that table!
a__ = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
a__ = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
a__ = [len(UpperCAmelCase__ ) + 2 for c in columns]
a__ = max([len(UpperCAmelCase__ ) for name in model_names] ) + 2
# Build the table per se
a__ = '|' + '|'.join([_center_text(UpperCAmelCase__,UpperCAmelCase__ ) for c, w in zip(UpperCAmelCase__,UpperCAmelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
a__ = {True: '✅', False: '❌'}
for name in model_names:
a__ = model_name_to_prefix[name]
a__ = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(UpperCAmelCase__,UpperCAmelCase__ ) for l, w in zip(UpperCAmelCase__,UpperCAmelCase__ )] ) + "|\n"
return table
def _lowerCamelCase ( UpperCAmelCase__=False ) -> Dict:
'''simple docstring'''
a__ , a__ , a__ , a__ = _find_text_in_file(
filename=os.path.join(UpperCAmelCase__,'index.md' ),start_prompt='<!--This table is updated automatically from the auto modules',end_prompt='<!-- End table-->',)
a__ = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(UpperCAmelCase__,'index.md' ),'w',encoding='utf-8',newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__magic_name__ = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 232
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
"configuration_electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraOnnxConfig"],
"tokenization_electra": ["ElectraTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["ElectraTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"ElectraForCausalLM",
"ElectraForMaskedLM",
"ElectraForMultipleChoice",
"ElectraForPreTraining",
"ElectraForQuestionAnswering",
"ElectraForSequenceClassification",
"ElectraForTokenClassification",
"ElectraModel",
"ElectraPreTrainedModel",
"load_tf_weights_in_electra",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFElectraForMaskedLM",
"TFElectraForMultipleChoice",
"TFElectraForPreTraining",
"TFElectraForQuestionAnswering",
"TFElectraForSequenceClassification",
"TFElectraForTokenClassification",
"TFElectraModel",
"TFElectraPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"FlaxElectraForCausalLM",
"FlaxElectraForMaskedLM",
"FlaxElectraForMultipleChoice",
"FlaxElectraForPreTraining",
"FlaxElectraForQuestionAnswering",
"FlaxElectraForSequenceClassification",
"FlaxElectraForTokenClassification",
"FlaxElectraModel",
"FlaxElectraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 232
| 1
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __A (__magic_name__ ):
snake_case :int = (DDIMParallelScheduler,)
snake_case :Tuple = (("eta", 0.0), ("num_inference_steps", 50))
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Dict = {
"num_train_timesteps": 10_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**UpperCamelCase_ )
return config
def _snake_case ( self , **UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config(**UpperCamelCase_ )
__UpperCAmelCase : int = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : Any = 10, 0.0
__UpperCAmelCase : List[Any] = self.dummy_model()
__UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase_ )
for t in scheduler.timesteps:
__UpperCAmelCase : Tuple = model(UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ).prev_sample
return sample
def _snake_case ( self ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase_ )
def _snake_case ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCamelCase_ )
__UpperCAmelCase : List[str] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[Any] = self.get_scheduler_config(steps_offset=1 )
__UpperCAmelCase : List[Any] = scheduler_class(**UpperCamelCase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def _snake_case ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase_ , beta_end=UpperCamelCase_ )
def _snake_case ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase_ )
def _snake_case ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase_ )
def _snake_case ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase_ )
def _snake_case ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCamelCase_ )
def _snake_case ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCamelCase_ )
def _snake_case ( self ):
self.check_over_configs(thresholding=UpperCamelCase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase_ , prediction_type=UpperCamelCase_ , sample_max_value=UpperCamelCase_ , )
def _snake_case ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCamelCase_ )
def _snake_case ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ )
def _snake_case ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=UpperCamelCase_ , eta=UpperCamelCase_ )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.scheduler_classes[0]
__UpperCAmelCase : Dict = self.get_scheduler_config()
__UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.0_2 ) ) < 1E-5
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__UpperCAmelCase : Optional[int] = self.get_scheduler_config()
__UpperCAmelCase : List[str] = scheduler_class(**UpperCamelCase_ )
__UpperCAmelCase , __UpperCAmelCase : int = 10, 0.0
scheduler.set_timesteps(UpperCamelCase_ )
__UpperCAmelCase : Any = self.dummy_model()
__UpperCAmelCase : str = self.dummy_sample_deter
__UpperCAmelCase : Optional[Any] = self.dummy_sample_deter + 0.1
__UpperCAmelCase : List[str] = self.dummy_sample_deter - 0.1
__UpperCAmelCase : str = samplea.shape[0]
__UpperCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__UpperCAmelCase : List[Any] = torch.arange(UpperCamelCase_ )[0:3, None].repeat(1 , UpperCamelCase_ )
__UpperCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__UpperCAmelCase : Tuple = scheduler.batch_step_no_noise(UpperCamelCase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , UpperCamelCase_ )
__UpperCAmelCase : int = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : str = self.full_loop()
__UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.full_loop(prediction_type="v_prediction" )
__UpperCAmelCase : Any = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : List[Any] = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : List[str] = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : int = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def _snake_case ( self ):
# We specify different beta, so that the first alpha is 0.99
__UpperCAmelCase : Optional[int] = self.full_loop(set_alpha_to_one=UpperCamelCase_ , beta_start=0.0_1 )
__UpperCAmelCase : Tuple = torch.sum(torch.abs(UpperCamelCase_ ) )
__UpperCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase_ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 10
|
'''simple docstring'''
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
__UpperCAmelCase : Dict = (boundary[1] - boundary[0]) / steps
__UpperCAmelCase : Tuple = boundary[0]
__UpperCAmelCase : List[str] = boundary[1]
__UpperCAmelCase : List[Any] = make_points(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__UpperCAmelCase : int = 0.0
y += (h / 2.0) * f(lowerCamelCase__ )
for i in x_i:
# print(i)
y += h * f(lowerCamelCase__ )
y += (h / 2.0) * f(lowerCamelCase__ )
return y
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = a + h
while x < (b - h):
yield x
__UpperCAmelCase : List[str] = x + h
def _lowercase ( lowerCamelCase__ ) -> Optional[Any]: # enter your function here
"""simple docstring"""
__UpperCAmelCase : str = (x - 0) * (x - 0)
return y
def _lowercase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Tuple = 0.0 # Lower bound of integration
__UpperCAmelCase : Union[str, Any] = 1.0 # Upper bound of integration
__UpperCAmelCase : Union[str, Any] = 10.0 # define number of steps or resolution
__UpperCAmelCase : Dict = [a, b] # define boundary of integration
__UpperCAmelCase : Optional[int] = method_a(lowerCamelCase__ , lowerCamelCase__ )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 10
| 1
|
_snake_case : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _A ( __snake_case :bytes ) -> Dict:
"""simple docstring"""
if not isinstance(_a , _a ):
__SCREAMING_SNAKE_CASE = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(_a )
__SCREAMING_SNAKE_CASE = "".join(bin(_a )[2:].zfill(8 ) for byte in data )
__SCREAMING_SNAKE_CASE = len(_a ) % 6 != 0
if padding_needed:
# The padding that will be added later
__SCREAMING_SNAKE_CASE = b"=" * ((6 - len(_a ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_a ) % 6)
else:
__SCREAMING_SNAKE_CASE = b""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_a ) , 6 ) ).encode()
+ padding
)
def _A ( __snake_case :str ) -> Tuple:
"""simple docstring"""
if not isinstance(_a , _a ) and not isinstance(_a , _a ):
__SCREAMING_SNAKE_CASE = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(_a )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_a , _a ):
try:
__SCREAMING_SNAKE_CASE = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
__SCREAMING_SNAKE_CASE = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_a ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__SCREAMING_SNAKE_CASE = encoded_data[:-padding]
__SCREAMING_SNAKE_CASE = "".join(
bin(B64_CHARSET.index(_a ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__SCREAMING_SNAKE_CASE = "".join(
bin(B64_CHARSET.index(_a ) )[2:].zfill(6 ) for char in encoded_data )
__SCREAMING_SNAKE_CASE = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_a ) , 8 )
]
return bytes(_a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 693
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
lowercase : int = [
'''EAGER''',
'''AOT_EAGER''',
'''INDUCTOR''',
'''NVFUSER''',
'''AOT_NVFUSER''',
'''AOT_CUDAGRAPHS''',
'''OFI''',
'''FX2TRT''',
'''ONNXRT''',
'''IPEX''',
]
def lowerCAmelCase__ ( _a : Dict , _a : Optional[int]=None , _a : Union[str, Any]=None , _a : List[Any]=None ):
snake_case_ : Optional[Any] = True
while ask_again:
snake_case_ : str = input(_a )
try:
if default is not None and len(_a ) == 0:
return default
return convert_value(_a ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_a )
def lowerCAmelCase__ ( _a : Dict , _a : str=[] , _a : Union[str, Any]=None , _a : Optional[int]=0 ):
snake_case_ : List[Any] = BulletMenu(_a , _a )
snake_case_ : Union[str, Any] = menu.run(default_choice=_a )
return convert_value(_a ) if convert_value is not None else result
def lowerCAmelCase__ ( _a : List[str] ):
snake_case_ : str = int(_a )
return ComputeEnvironment(["LOCAL_MACHINE", "AMAZON_SAGEMAKER"][value] )
def lowerCAmelCase__ ( _a : int ):
snake_case_ : str = int(_a )
return DistributedType(["NO", "MULTI_CPU", "MULTI_XPU", "MULTI_GPU", "MULTI_NPU", "TPU"][value] )
def lowerCAmelCase__ ( _a : Optional[Any] ):
snake_case_ : int = int(_a )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowerCAmelCase__ ( _a : Dict ):
snake_case_ : str = int(_a )
return PrecisionType(["no", "fp16", "bf16", "fp8"][value] )
def lowerCAmelCase__ ( _a : int ):
snake_case_ : Optional[int] = int(_a )
return SageMakerDistributedType(["NO", "DATA_PARALLEL", "MODEL_PARALLEL"][value] )
def lowerCAmelCase__ ( _a : str ):
return {"yes": True, "no": False}[value.lower()]
class UpperCAmelCase_ ( argparse.RawDescriptionHelpFormatter ):
'''simple docstring'''
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
snake_case_ : Tuple = super()._format_usage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ : Any = usage.replace("<command> [<args>] " , "" )
return usage
| 568
| 0
|
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
__UpperCAmelCase = logging.getLogger(__name__)
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="sequence-classification"
def __init__( self , _A ) -> Optional[Any]:
if type(_A ) == dict:
SCREAMING_SNAKE_CASE_ = Namespace(**_A )
SCREAMING_SNAKE_CASE_ = glue_output_modes[hparams.task]
SCREAMING_SNAKE_CASE_ = glue_tasks_num_labels[hparams.task]
super().__init__(_A , _A , self.mode )
def _UpperCamelCase ( self , **_A ) -> Optional[Any]:
return self.model(**_A )
def _UpperCamelCase ( self , _A , _A ) -> int:
SCREAMING_SNAKE_CASE_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
SCREAMING_SNAKE_CASE_ = self(**_A )
SCREAMING_SNAKE_CASE_ = outputs[0]
SCREAMING_SNAKE_CASE_ = self.trainer.lr_schedulers[0]['''scheduler''']
SCREAMING_SNAKE_CASE_ = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.hparams
SCREAMING_SNAKE_CASE_ = processors[args.task]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
for mode in ["train", "dev"]:
SCREAMING_SNAKE_CASE_ = self._feature_file(_A )
if os.path.exists(_A ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _A )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
SCREAMING_SNAKE_CASE_ = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
SCREAMING_SNAKE_CASE_ = convert_examples_to_features(
_A , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , _A )
torch.save(_A , _A )
def _UpperCamelCase ( self , _A , _A , _A = False ) -> DataLoader:
SCREAMING_SNAKE_CASE_ = '''dev''' if mode == '''test''' else mode
SCREAMING_SNAKE_CASE_ = self._feature_file(_A )
logger.info('''Loading features from cached file %s''' , _A )
SCREAMING_SNAKE_CASE_ = torch.load(_A )
SCREAMING_SNAKE_CASE_ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE_ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE_ = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE_ = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_A , _A , _A , _A ) , batch_size=_A , shuffle=_A , )
def _UpperCamelCase ( self , _A , _A ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
SCREAMING_SNAKE_CASE_ = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
SCREAMING_SNAKE_CASE_ = self(**_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = outputs[:2]
SCREAMING_SNAKE_CASE_ = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _UpperCamelCase ( self , _A ) -> tuple:
SCREAMING_SNAKE_CASE_ = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
SCREAMING_SNAKE_CASE_ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
SCREAMING_SNAKE_CASE_ = np.argmax(_A , axis=1 )
elif self.hparams.glue_output_mode == "regression":
SCREAMING_SNAKE_CASE_ = np.squeeze(_A )
SCREAMING_SNAKE_CASE_ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE_ = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE_ = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE_ = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , _A , _A )}
SCREAMING_SNAKE_CASE_ = dict(results.items() )
SCREAMING_SNAKE_CASE_ = results
return ret, preds_list, out_label_list
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._eval_end(_A )
SCREAMING_SNAKE_CASE_ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _UpperCamelCase ( self , _A ) -> dict:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._eval_end(_A )
SCREAMING_SNAKE_CASE_ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _UpperCamelCase ( _A , _A ) -> str:
BaseTransformer.add_model_specific_args(_A , _A )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_A , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=_A , required=_A , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_A , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def A__ ( ):
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
add_generic_args(__lowerCamelCase, os.getcwd() )
SCREAMING_SNAKE_CASE_ = GLUETransformer.add_model_specific_args(__lowerCamelCase, os.getcwd() )
SCREAMING_SNAKE_CASE_ = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
SCREAMING_SNAKE_CASE_ = os.path.join(
'''./results''', F'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''', )
os.makedirs(args.output_dir )
SCREAMING_SNAKE_CASE_ = GLUETransformer(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = generic_train(__lowerCamelCase, __lowerCamelCase )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
SCREAMING_SNAKE_CASE_ = sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt''' ), recursive=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(__lowerCamelCase )
if __name__ == "__main__":
main()
| 597
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
SCREAMING_SNAKE_CASE_ = True
# sum is not zero and set is empty then false
for i in range(1, required_sum + 1 ):
SCREAMING_SNAKE_CASE_ = False
for i in range(1, arr_len + 1 ):
for j in range(1, required_sum + 1 ):
if arr[i - 1] > j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j]
if arr[i - 1] <= j:
SCREAMING_SNAKE_CASE_ = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 597
| 1
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_snake_case : Union[str, Any] = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : str , **lowerCamelCase : Any ) -> None:
warnings.warn(
"The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use MobileViTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = IFInpaintingPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
return self._get_dummy_components()
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Any:
"""simple docstring"""
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
__SCREAMING_SNAKE_CASE = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor((1, 3, 32, 32) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self._test_save_load_local()
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 627
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Dict = """vivit"""
def __init__( self: int , snake_case: str=224 , snake_case: Tuple=32 , snake_case: List[str]=[2, 16, 16] , snake_case: Optional[int]=3 , snake_case: int=768 , snake_case: Optional[Any]=12 , snake_case: str=12 , snake_case: Any=3_072 , snake_case: int="gelu_fast" , snake_case: Optional[Any]=0.0 , snake_case: List[str]=0.0 , snake_case: Dict=0.0_2 , snake_case: Optional[Any]=1E-06 , snake_case: List[str]=True , **snake_case: Union[str, Any] , ) -> Union[str, Any]:
snake_case_ :Dict = hidden_size
snake_case_ :Union[str, Any] = num_hidden_layers
snake_case_ :List[Any] = num_attention_heads
snake_case_ :str = intermediate_size
snake_case_ :Optional[Any] = hidden_act
snake_case_ :int = hidden_dropout_prob
snake_case_ :str = attention_probs_dropout_prob
snake_case_ :str = initializer_range
snake_case_ :int = layer_norm_eps
snake_case_ :int = image_size
snake_case_ :Tuple = num_frames
snake_case_ :str = tubelet_size
snake_case_ :Union[str, Any] = num_channels
snake_case_ :Optional[Any] = qkv_bias
super().__init__(**snake_case )
| 310
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__a = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ["ConditionalDetrFeatureExtractor"]
__a = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 310
| 1
|
"""simple docstring"""
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def snake_case ( _a: str , _a: str )-> List[Any]:
'''simple docstring'''
lowerCamelCase__ = old_name
if "patch_embed" in old_name:
lowerCamelCase__ = old_name.split('.' )
if layer == "0":
lowerCamelCase__ = old_name.replace('0' , 'convolution1' )
elif layer == "1":
lowerCamelCase__ = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
lowerCamelCase__ = old_name.replace('3' , 'convolution2' )
else:
lowerCamelCase__ = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , _a ):
lowerCamelCase__ = R'''\b\d{2}\b'''
if bool(re.search(_a , _a ) ):
lowerCamelCase__ = re.search(R'\d\.\d\d.' , _a ).group()
else:
lowerCamelCase__ = re.search(R'\d\.\d.' , _a ).group()
if int(match[0] ) < 6:
lowerCamelCase__ = old_name.replace(_a , '' )
lowerCamelCase__ = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
lowerCamelCase__ = '''intermediate_stages.''' + trimmed_name
else:
lowerCamelCase__ = old_name.replace(_a , '' )
if int(match[2] ) < num_meta4D_last_stage:
lowerCamelCase__ = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
lowerCamelCase__ = str(int(match[2] ) - num_meta4D_last_stage )
lowerCamelCase__ = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
lowerCamelCase__ = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
lowerCamelCase__ = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
lowerCamelCase__ = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
lowerCamelCase__ = trimmed_name.replace('fc2' , 'linear_out' )
lowerCamelCase__ = '''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , _a ):
lowerCamelCase__ = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
lowerCamelCase__ = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
lowerCamelCase__ = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
lowerCamelCase__ = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
lowerCamelCase__ = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
lowerCamelCase__ = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
lowerCamelCase__ = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
lowerCamelCase__ = '''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
lowerCamelCase__ = new_name.replace('norm' , 'layernorm' )
lowerCamelCase__ = '''efficientformer.''' + new_name
else:
lowerCamelCase__ = '''efficientformer.encoder.''' + new_name
return new_name
def snake_case ( _a: Tuple , _a: Optional[int] )-> Tuple:
'''simple docstring'''
for key in checkpoint.copy().keys():
lowerCamelCase__ = checkpoint.pop(_a )
lowerCamelCase__ = val
return checkpoint
def snake_case ( )-> Dict:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(_a , stream=_a ).raw )
return image
def snake_case ( _a: Path , _a: Path , _a: Path , _a: bool )-> List[str]:
'''simple docstring'''
lowerCamelCase__ = torch.load(_a , map_location='cpu' )['''model''']
lowerCamelCase__ = EfficientFormerConfig.from_json_file(_a )
lowerCamelCase__ = EfficientFormerForImageClassificationWithTeacher(_a )
lowerCamelCase__ = '''_'''.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
lowerCamelCase__ = config.depths[-1] - config.num_metaad_blocks + 1
lowerCamelCase__ = convert_torch_checkpoint(_a , _a )
model.load_state_dict(_a )
model.eval()
lowerCamelCase__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = 256
lowerCamelCase__ = 224
lowerCamelCase__ = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
lowerCamelCase__ = processor(images=_a , return_tensors='pt' ).pixel_values
# original processing pipeline
lowerCamelCase__ = Compose(
[
Resize(_a , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_a ),
ToTensor(),
Normalize(_a , _a ),
] )
lowerCamelCase__ = image_transforms(_a ).unsqueeze(0 )
assert torch.allclose(_a , _a )
lowerCamelCase__ = model(_a )
lowerCamelCase__ = outputs.logits
lowerCamelCase__ = (1, 1000)
if "l1" in model_name:
lowerCamelCase__ = torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , _a , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
lowerCamelCase__ = torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , _a , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
lowerCamelCase__ = torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_a ).mkdir(exist_ok=_a )
model.save_pretrained(_a )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_a )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_a , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_a , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_snake_case = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 510
|
from math import log
from scipy.constants import Boltzmann, physical_constants
__SCREAMING_SNAKE_CASE : int = 3_00 # TEMPERATURE (unit = K)
def UpperCAmelCase__ ( __magic_name__ : float , __magic_name__ : float , __magic_name__ : float , ):
'''simple docstring'''
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 348
| 0
|
from __future__ import annotations
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = len(UpperCamelCase__ ) // 2
# choose the middle 3 elements
snake_case_ = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 108
| 0
|
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__snake_case : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class UpperCamelCase ( a ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] ="ernie_m"
_lowerCamelCase : Dict[str, str] ={"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Any , _lowerCamelCase : int = 2_5_0_0_0_2 , _lowerCamelCase : int = 7_6_8 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 3_0_7_2 , _lowerCamelCase : str = "gelu" , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : int = 5_1_4 , _lowerCamelCase : float = 0.02 , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1E-05 , _lowerCamelCase : List[Any]=None , _lowerCamelCase : List[Any]=False , _lowerCamelCase : str=0.0 , **_lowerCamelCase : Optional[Any] , ):
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout
A__ = is_decoder
A__ = act_dropout
| 571
|
"""simple docstring"""
def a_ ( __a ):
assert (
isinstance(__a , __a ) and number_of_steps > 0
), f'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
A__ , A__ = 1, 1
for _ in range(number_of_steps - 1 ):
A__ , A__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 571
| 1
|
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
_lowercase : Any = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
_lowercase : Optional[Any] = str(bin(__UpperCAmelCase ) )[2:]
_lowercase : Optional[int] = max(len(__UpperCAmelCase ) ,len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) ,b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ["input_features", "attention_mask"]
def __init__( self : int , lowerCamelCase_ : List[str]=8_0 , lowerCamelCase_ : Tuple=1_6_0_0_0 , lowerCamelCase_ : Union[str, Any]=0.0 , lowerCamelCase_ : List[Any]=1_0 , lowerCamelCase_ : List[str]=2_5 , lowerCamelCase_ : List[Any]="hamming_window" , lowerCamelCase_ : Tuple=3_2768.0 , lowerCamelCase_ : int=0.97 , lowerCamelCase_ : Optional[int]=1.0 , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Union[str, Any]=True , lowerCamelCase_ : Optional[int]=False , **lowerCamelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(feature_size=lowerCamelCase_ , sampling_rate=lowerCamelCase_ , padding_value=lowerCamelCase_ , **lowerCamelCase_ )
_lowercase : Dict = feature_size
_lowercase : Dict = sampling_rate
_lowercase : Tuple = padding_value
_lowercase : int = hop_length
_lowercase : Any = win_length
_lowercase : Union[str, Any] = frame_signal_scale
_lowercase : Tuple = preemphasis_coeff
_lowercase : Tuple = mel_floor
_lowercase : Tuple = normalize_means
_lowercase : List[Any] = normalize_vars
_lowercase : List[str] = win_function
_lowercase : int = return_attention_mask
_lowercase : Optional[Any] = win_length * sampling_rate // 1_0_0_0
_lowercase : Tuple = hop_length * sampling_rate // 1_0_0_0
_lowercase : str = optimal_fft_length(self.sample_size )
_lowercase : Dict = (self.n_fft // 2) + 1
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : np.array ):
"""simple docstring"""
if self.win_function == "hamming_window":
_lowercase : List[Any] = window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCamelCase_ )
else:
_lowercase : Union[str, Any] = window_function(window_length=self.sample_size , name=self.win_function )
_lowercase : Tuple = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
_lowercase : Tuple = spectrogram(
one_waveform * self.frame_signal_scale , window=lowerCamelCase_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=lowerCamelCase_ , preemphasis=self.preemphasis_coeff , mel_filters=lowerCamelCase_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Tuple ):
"""simple docstring"""
if self.normalize_means:
_lowercase : Optional[int] = x[:input_length].mean(axis=0 )
_lowercase : int = np.subtract(lowerCamelCase_ , lowerCamelCase_ )
if self.normalize_vars:
_lowercase : int = x[:input_length].std(axis=0 )
_lowercase : Optional[Any] = np.divide(lowerCamelCase_ , lowerCamelCase_ )
if input_length < x.shape[0]:
_lowercase : Dict = padding_value
# make sure array is in float32
_lowercase : Tuple = x.astype(np.floataa )
return x
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[np.ndarray] , lowerCamelCase_ : Optional[np.ndarray] = None ):
"""simple docstring"""
_lowercase : Dict = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(lowerCamelCase_ , lowerCamelCase_ , self.padding_value ) for x, n in zip(lowerCamelCase_ , lowerCamelCase_ )]
def __call__( self : Dict , lowerCamelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowerCamelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : bool = False , lowerCamelCase_ : Optional[int] = None , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[Union[str, TensorType]] = None , lowerCamelCase_ : Optional[int] = None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_lowercase : Optional[Any] = isinstance(lowerCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
_lowercase : Optional[int] = is_batched_numpy or (
isinstance(lowerCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_lowercase : str = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(lowerCamelCase_ , np.ndarray ):
_lowercase : Tuple = np.asarray(lowerCamelCase_ , dtype=np.floataa )
elif isinstance(lowerCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_lowercase : str = [raw_speech]
# extract fbank features
_lowercase : Optional[Any] = [self._extract_mfsc_features(lowerCamelCase_ ) for one_waveform in raw_speech]
# convert into correct format for padding
_lowercase : Optional[int] = BatchFeature({'input_features': features} )
_lowercase : Tuple = self.pad(
lowerCamelCase_ , padding=lowerCamelCase_ , max_length=lowerCamelCase_ , truncation=lowerCamelCase_ , pad_to_multiple_of=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , **lowerCamelCase_ , )
# make sure list is in array format
_lowercase : Dict = padded_inputs.get('input_features' )
if isinstance(input_features[0] , lowerCamelCase_ ):
_lowercase : List[str] = [np.asarray(lowerCamelCase_ , dtype=np.floataa ) for feature in input_features]
_lowercase : List[Any] = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_lowercase : Union[str, Any] = [np.asarray(lowerCamelCase_ , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowercase : int = (
np.array(lowerCamelCase_ , dtype=np.intaa )
if self._get_padding_strategies(lowerCamelCase_ , max_length=lowerCamelCase_ ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowercase : List[Any] = self.normalize(
padded_inputs['input_features'] , attention_mask=lowerCamelCase_ )
if return_tensors is not None:
_lowercase : Union[str, Any] = padded_inputs.convert_to_tensors(lowerCamelCase_ )
return padded_inputs
| 283
| 1
|
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = 42
lowercase = None
def __magic_name__ ( lowercase_ , lowercase_=0.999 , lowercase_="cosine" , ) -> List[Any]:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase_ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase_ ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
UpperCamelCase = []
for i in range(lowercase_ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase_ ) / alpha_bar_fn(lowercase_ ) , lowercase_ ) )
return torch.tensor(lowercase_ , dtype=torch.floataa )
class __UpperCAmelCase ( snake_case__ , snake_case__ ):
"""simple docstring"""
lowercase = 1
@register_to_config
def __init__( self , SCREAMING_SNAKE_CASE = 1000 , SCREAMING_SNAKE_CASE = 0.0_001 , SCREAMING_SNAKE_CASE = 0.02 , SCREAMING_SNAKE_CASE = "linear" , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = 0 , SCREAMING_SNAKE_CASE = "epsilon" , SCREAMING_SNAKE_CASE = 1.0 , **SCREAMING_SNAKE_CASE , ) -> Tuple:
"""simple docstring"""
if kwargs.get("set_alpha_to_one" , SCREAMING_SNAKE_CASE ) is not None:
UpperCamelCase = (
"The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."
)
deprecate("set_alpha_to_one" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs["set_alpha_to_one"]
if trained_betas is not None:
UpperCamelCase = torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCamelCase = torch.linspace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCamelCase = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCamelCase = betas_for_alpha_bar(SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
UpperCamelCase = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , SCREAMING_SNAKE_CASE ).copy().astype(np.intaa ) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> List[str]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
UpperCamelCase = num_inference_steps
UpperCamelCase = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCamelCase = (np.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round().copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
self.timesteps += self.config.steps_offset
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = False , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
# 1. get previous step value (=t+1)
UpperCamelCase = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
UpperCamelCase = self.alphas_cumprod[timestep]
UpperCamelCase = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
UpperCamelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
UpperCamelCase = model_output
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
UpperCamelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
UpperCamelCase = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
UpperCamelCase = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
" `v_prediction`" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
UpperCamelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , pred_original_sample=SCREAMING_SNAKE_CASE )
def __len__( self ) -> Any:
"""simple docstring"""
return self.config.num_train_timesteps
| 606
|
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__a : Optional[int] = 1_0
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
for i in range(lowercase_ , lowercase_ ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
UpperCamelCase = 0
UpperCamelCase = len(lowercase_ )
while left <= right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCamelCase = one_third - 1
elif array[two_third] < target:
UpperCamelCase = two_third + 1
else:
UpperCamelCase = one_third + 1
UpperCamelCase = two_third - 1
else:
return -1
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = (left + right) // 3 + 1
UpperCamelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase_ , one_third - 1 , lowercase_ , lowercase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase_ , lowercase_ , lowercase_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase_ , lowercase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : Optional[Any] = input("""Enter numbers separated by comma:\n""").strip()
__a : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__a : Optional[Any] = int(input("""Enter the number to be found in the list:\n""").strip())
__a : Optional[Any] = ite_ternary_search(collection, target)
__a : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'Iterative search: {target} found at positions: {resulta}')
print(F'Recursive search: {target} found at positions: {resulta}')
else:
print("""Not found""")
| 606
| 1
|
"""simple docstring"""
def snake_case ( A__ ,A__ ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(A__ ) * abs(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 463
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase_ :
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
UpperCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 463
| 1
|
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
if not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
raise ValueError('''Input must be an integer''' )
if input_num <= 0:
raise ValueError('''Input must be positive''' )
return sum(
divisor for divisor in range(1, input_num // 2 + 1 ) if input_num % divisor == 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 604
|
'''simple docstring'''
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number | (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def UpperCamelCase_ ( _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 244
| 0
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_lowerCamelCase : List[Any] = datasets.utils.logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE ( datasets.BuilderConfig ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 10_000
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
class SCREAMING_SNAKE_CASE ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ParquetConfig
def A ( self : Tuple ):
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def A ( self : Union[str, Any] , UpperCamelCase__ : str ):
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_lowercase , (str, list, tuple) ):
UpperCamelCase = data_files
if isinstance(_lowercase , _lowercase ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(_lowercase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files} )]
UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(_lowercase , _lowercase ):
UpperCamelCase = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
UpperCamelCase = [dl_manager.iter_files(_lowercase ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(_lowercase ):
with open(_lowercase , 'rb' ) as f:
UpperCamelCase = datasets.Features.from_arrow_schema(pq.read_schema(_lowercase ) )
break
splits.append(datasets.SplitGenerator(name=_lowercase , gen_kwargs={'files': files} ) )
return splits
def A ( self : Union[str, Any] , UpperCamelCase__ : int ):
"""simple docstring"""
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
UpperCamelCase = table_cast(_lowercase , self.info.features.arrow_schema )
return pa_table
def A ( self : Dict , UpperCamelCase__ : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns \'{self.config.columns}\' with mismatching features \'{self.info.features}\'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(_lowercase ) ):
with open(_lowercase , 'rb' ) as f:
UpperCamelCase = pq.ParquetFile(_lowercase )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
UpperCamelCase = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(_lowercase )
except ValueError as e:
logger.error(f"""Failed to read file \'{file}\' with error {type(_lowercase )}: {e}""" )
raise
| 721
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
_lowerCamelCase : Tuple = {
"google/bigbird-roberta-base": 4096,
"google/bigbird-roberta-large": 4096,
"google/bigbird-base-trivia-itc": 4096,
}
_lowerCamelCase : int = "▁"
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = BigBirdTokenizer
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
_SCREAMING_SNAKE_CASE = []
def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]="<unk>" , UpperCamelCase__ : Optional[int]="<s>" , UpperCamelCase__ : List[Any]="</s>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : List[Any]="[SEP]" , UpperCamelCase__ : List[str]="[MASK]" , UpperCamelCase__ : Union[str, Any]="[CLS]" , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def A ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 324
| 0
|
from math import factorial
def lowerCAmelCase ( UpperCamelCase__ : int = 100 ) -> int:
"""simple docstring"""
return sum(map(UpperCamelCase__ , str(factorial(UpperCamelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 202
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 202
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a =logging.get_logger(__name__)
a ={
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
a =[
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
__lowerCamelCase : int = '''lm_head'''
__lowerCamelCase : List[str] = getattr(lowerCamelCase__ , lowerCamelCase__ )
if weight_type is not None:
__lowerCamelCase : Tuple = getattr(lowerCamelCase__ , lowerCamelCase__ ).shape
else:
__lowerCamelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "weight_g":
__lowerCamelCase : Tuple = value
elif weight_type == "weight_v":
__lowerCamelCase : Optional[Any] = value
elif weight_type == "bias":
__lowerCamelCase : str = value
else:
__lowerCamelCase : Any = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = fairseq_model.state_dict()
__lowerCamelCase : int = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
__lowerCamelCase : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__lowerCamelCase : str = True
else:
for key, mapped_key in MAPPING.items():
__lowerCamelCase : Any = '''unispeech.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCamelCase : Dict = True
if "*" in mapped_key:
__lowerCamelCase : Tuple = name.split(lowerCamelCase__ )[0].split('.' )[-2]
__lowerCamelCase : int = mapped_key.replace('*' , lowerCamelCase__ )
if "weight_g" in name:
__lowerCamelCase : Optional[Any] = '''weight_g'''
elif "weight_v" in name:
__lowerCamelCase : List[str] = '''weight_v'''
elif "bias" in name:
__lowerCamelCase : Union[str, Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCamelCase : Optional[Any] = '''weight'''
else:
__lowerCamelCase : Optional[int] = None
set_recursively(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
continue
if not is_used:
unused_weights.append(lowerCamelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Optional[int] = full_name.split('conv_layers.' )[-1]
__lowerCamelCase : Optional[int] = name.split('.' )
__lowerCamelCase : List[Any] = int(items[0] )
__lowerCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCamelCase : List[str] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCamelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCamelCase : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCamelCase : Optional[int] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowerCamelCase__ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=True ) -> Optional[Any]:
if config_path is not None:
__lowerCamelCase : List[str] = UniSpeechConfig.from_pretrained(lowerCamelCase__ )
else:
__lowerCamelCase : Optional[int] = UniSpeechConfig()
if is_finetuned:
if dict_path:
__lowerCamelCase : str = Dictionary.load_from_json(lowerCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__lowerCamelCase : Optional[int] = target_dict.pad_index
__lowerCamelCase : str = target_dict.bos_index
__lowerCamelCase : List[Any] = target_dict.eos_index
__lowerCamelCase : int = len(target_dict.symbols )
__lowerCamelCase : str = os.path.join(lowerCamelCase__ , 'vocab.json' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCamelCase__ ) )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowerCamelCase : List[Any] = target_dict.indices
# fairseq has the <pad> and <s> switched
__lowerCamelCase : str = 4_2
__lowerCamelCase : List[str] = 4_3
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Dict = WavaVecaPhonemeCTCTokenizer(
lowerCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCamelCase__ , )
__lowerCamelCase : int = True if config.feat_extract_norm == '''layer''' else False
__lowerCamelCase : Union[str, Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
__lowerCamelCase : int = WavaVecaProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
__lowerCamelCase : Dict = UniSpeechForCTC(lowerCamelCase__ )
else:
__lowerCamelCase : Dict = UniSpeechForPreTraining(lowerCamelCase__ )
if is_finetuned:
__lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
__lowerCamelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
__lowerCamelCase : Optional[int] = model[0].eval()
recursively_load_weights(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
hf_unispeech.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
a =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 718
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a ={
"""configuration_maskformer""": ["""MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MaskFormerConfig"""],
"""configuration_maskformer_swin""": ["""MaskFormerSwinConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MaskFormerFeatureExtractor"""]
a =["""MaskFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MaskFormerForInstanceSegmentation""",
"""MaskFormerModel""",
"""MaskFormerPreTrainedModel""",
]
a =[
"""MaskFormerSwinBackbone""",
"""MaskFormerSwinModel""",
"""MaskFormerSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 337
| 0
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__UpperCAmelCase = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __lowercase :
snake_case_ = 42
snake_case_ = None
snake_case_ = None
snake_case_ = None
snake_case_ = None
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = _str_to_version_tuple(self.version_str )
def __repr__( self : Tuple ):
'''simple docstring'''
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return self.major, self.minor, self.patch
def __lowercase ( self : Any ,A : List[Any] ):
'''simple docstring'''
if isinstance(A ,A ):
return Version(A )
elif isinstance(A ,A ):
return other
raise TypeError(f"{other} (type {type(A )}) cannot be compared to version." )
def __eq__( self : Union[str, Any] ,A : List[str] ):
'''simple docstring'''
try:
UpperCAmelCase__ : Optional[Any] = self._validate_operand(A )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : str ,A : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self._validate_operand(A )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowercase ( cls : Any ,A : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowercase ( self : List[Any] ):
'''simple docstring'''
return self.version_str
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = _VERSION_REG.match(__UpperCamelCase )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(__UpperCamelCase ) for v in [res.group("""major""" ), res.group("""minor""" ), res.group("""patch""" )] )
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
return ".".join(str(__UpperCamelCase ) for v in version_tuple )
| 65
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_lowerCAmelCase = None
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_lowerCAmelCase = {
'vocab_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/spiece.model',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/spiece.model',
},
'tokenizer_file': {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json',
},
}
_lowerCAmelCase = {
'google/fnet-base': 5_12,
'google/fnet-large': 5_12,
}
_lowerCAmelCase = '▁'
class UpperCamelCase (__snake_case ):
_SCREAMING_SNAKE_CASE : Dict = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""input_ids""", """token_type_ids"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = FNetTokenizer
def __init__( self :Optional[Any] , __magic_name__ :Union[str, Any]=None , __magic_name__ :Any=None , __magic_name__ :List[str]=False , __magic_name__ :Any=True , __magic_name__ :Any=True , __magic_name__ :Union[str, Any]="<unk>" , __magic_name__ :List[Any]="[SEP]" , __magic_name__ :Union[str, Any]="<pad>" , __magic_name__ :Tuple="[CLS]" , __magic_name__ :Optional[int]="[MASK]" , **__magic_name__ :Optional[int] , ) ->Tuple:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase : Union[str, Any] = (
AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ , normalized=__magic_name__ )
if isinstance(__magic_name__ , __magic_name__ )
else mask_token
)
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , remove_space=__magic_name__ , keep_accents=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , **__magic_name__ , )
lowercase : Tuple = do_lower_case
lowercase : Tuple = remove_space
lowercase : List[str] = keep_accents
lowercase : Union[str, Any] = vocab_file
lowercase : int = False if not self.vocab_file else True
def __snake_case ( self :Any , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : List[str] = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case ( self :int , __magic_name__ :List[int] , __magic_name__ :Optional[List[int]] = None ) ->List[int]:
lowercase : Tuple = [self.sep_token_id]
lowercase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self :int , __magic_name__ :str , __magic_name__ :Optional[str] = None ) ->Tuple[str]:
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : str = os.path.join(
__magic_name__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 264
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__: Any = logging.get_logger(__name__)
a__: List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = '''trocr'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self,__lowerCamelCase=5_0265,__lowerCamelCase=1024,__lowerCamelCase=12,__lowerCamelCase=16,__lowerCamelCase=4096,__lowerCamelCase="gelu",__lowerCamelCase=512,__lowerCamelCase=0.1,__lowerCamelCase=0.0,__lowerCamelCase=0.0,__lowerCamelCase=2,__lowerCamelCase=0.02,__lowerCamelCase=0.0,__lowerCamelCase=True,__lowerCamelCase=False,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=1,__lowerCamelCase=0,__lowerCamelCase=2,**__lowerCamelCase,):
A__ = vocab_size
A__ = d_model
A__ = decoder_layers
A__ = decoder_attention_heads
A__ = decoder_ffn_dim
A__ = activation_function
A__ = max_position_embeddings
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = init_std
A__ = decoder_layerdrop
A__ = use_cache
A__ = scale_embedding
A__ = use_learned_position_embeddings
A__ = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase,bos_token_id=__lowerCamelCase,eos_token_id=__lowerCamelCase,decoder_start_token_id=__lowerCamelCase,**__lowerCamelCase,)
| 212
|
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
a__: Any = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
a__: List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCamelCase__( )->Dict:
A__ = '''https://pypi.org/pypi/diffusers/json'''
A__ = json.loads(request.urlopen(UpperCamelCase__ ).read() )['''releases'''].keys()
return sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : version.Version(UpperCamelCase__ ) )
def UpperCamelCase__( )->List[Any]:
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = Path(UpperCamelCase__ ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] )->Union[str, Any]:
init_hf_modules()
A__ = Path(UpperCamelCase__ ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->str:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.read()
# Imports of the form `import .xxx`
A__ = re.findall('''^\s*import\s+\.(\S+)\s*$''' , UpperCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , UpperCamelCase__ , flags=re.MULTILINE )
# Unique-ify
return list(set(UpperCamelCase__ ) )
def UpperCamelCase__( UpperCamelCase__ : Dict )->str:
A__ = False
A__ = [module_file]
A__ = []
# Let's recurse through all relative imports
while not no_change:
A__ = []
for f in files_to_check:
new_imports.extend(get_relative_imports(UpperCamelCase__ ) )
A__ = Path(UpperCamelCase__ ).parent
A__ = [str(module_path / m ) for m in new_imports]
A__ = [f for f in new_import_files if f not in all_relative_imports]
A__ = [f"{f}.py" for f in new_import_files]
A__ = len(UpperCamelCase__ ) == 0
all_relative_imports.extend(UpperCamelCase__ )
return all_relative_imports
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->List[Any]:
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
A__ = f.read()
# Imports of the form `import xxx`
A__ = re.findall('''^\s*import\s+(\S+)\s*$''' , UpperCamelCase__ , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , UpperCamelCase__ , flags=re.MULTILINE )
# Only keep the top-level module
A__ = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
A__ = list(set(UpperCamelCase__ ) )
A__ = []
for imp in imports:
try:
importlib.import_module(UpperCamelCase__ )
except ImportError:
missing_packages.append(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f"{', '.join(UpperCamelCase__ )}. Run `pip install {' '.join(UpperCamelCase__ )}`" )
return get_relative_imports(UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple )->Optional[int]:
A__ = module_path.replace(os.path.sep , '''.''' )
A__ = importlib.import_module(UpperCamelCase__ )
if class_name is None:
return find_pipeline_class(UpperCamelCase__ )
return getattr(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[Any] )->int:
from ..pipelines import DiffusionPipeline
A__ = dict(inspect.getmembers(UpperCamelCase__ , inspect.isclass ) )
A__ = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , UpperCamelCase__ )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"
f" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"
f" {loaded_module}." )
A__ = cls
return pipeline_class
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , )->List[Any]:
A__ = str(UpperCamelCase__ )
A__ = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.isfile(UpperCamelCase__ ):
A__ = module_file_or_url
A__ = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
A__ = get_diffusers_versions()
# cut ".dev0"
A__ = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
A__ = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f"Defaulting to latest_version: {revision}." )
elif revision in available_versions:
A__ = f"v{revision}"
elif revision == "main":
A__ = revision
else:
raise ValueError(
f"`custom_revision`: {revision} does not exist. Please make sure to choose one of"
f" {', '.join(available_versions + ['main'] )}." )
# community pipeline on GitHub
A__ = COMMUNITY_PIPELINES_URL.format(revision=UpperCamelCase__ , pipeline=UpperCamelCase__ )
try:
A__ = cached_download(
UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , )
A__ = '''git'''
A__ = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
else:
try:
# Load from URL or cache if already cached
A__ = hf_hub_download(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , proxies=UpperCamelCase__ , resume_download=UpperCamelCase__ , local_files_only=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , )
A__ = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f"Could not locate the {module_file} inside {pretrained_model_name_or_path}." )
raise
# Check we have all the requirements in our environment
A__ = check_imports(UpperCamelCase__ )
# Now we move the module inside our cached dynamic modules.
A__ = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(UpperCamelCase__ )
A__ = Path(UpperCamelCase__ ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(UpperCamelCase__ , submodule_path / module_file )
for module_needed in modules_needed:
A__ = f"{module_needed}.py"
shutil.copy(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = use_auth_token
elif use_auth_token is True:
A__ = HfFolder.get_token()
else:
A__ = None
A__ = model_info(UpperCamelCase__ , revision=UpperCamelCase__ , token=UpperCamelCase__ ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A__ = submodule_path / commit_hash
A__ = full_submodule + os.path.sep + commit_hash
create_dynamic_module(UpperCamelCase__ )
if not (submodule_path / module_file).exists():
shutil.copy(UpperCamelCase__ , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
UpperCamelCase__ , f"{module_needed}.py" , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
return os.path.join(UpperCamelCase__ , UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Union[str, os.PathLike] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[Union[str, os.PathLike]] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[Dict[str, str]] = None , UpperCamelCase__ : Optional[Union[bool, str]] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Optional[int] , )->Optional[Any]:
A__ = get_cached_module_file(
UpperCamelCase__ , UpperCamelCase__ , cache_dir=UpperCamelCase__ , force_download=UpperCamelCase__ , resume_download=UpperCamelCase__ , proxies=UpperCamelCase__ , use_auth_token=UpperCamelCase__ , revision=UpperCamelCase__ , local_files_only=UpperCamelCase__ , )
return get_class_in_module(UpperCamelCase__ , final_module.replace('''.py''' , '''''' ) )
| 212
| 1
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
lowercase_ = "hf-internal-testing/tiny-random-bert"
lowercase_ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
lowercase_ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class __A ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ) -> List[Any]:
"""simple docstring"""
_a = cached_file(A , A )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(A ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(A , A ) ) )
with open(os.path.join(A , '''refs''' , '''main''' ) ) as f:
_a = f.read()
self.assertEqual(A , os.path.join(A , '''snapshots''' , A , A ) )
self.assertTrue(os.path.isfile(A ) )
# File is cached at the same place the second time.
_a = cached_file(A , A )
self.assertEqual(A , A )
# Using a specific revision to test the full commit hash.
_a = cached_file(A , A , revision='''9b8c223''' )
self.assertEqual(A , os.path.join(A , '''snapshots''' , A , A ) )
def a__ (self ) -> Dict:
"""simple docstring"""
with self.assertRaisesRegex(A , '''is not a valid model identifier''' ):
_a = cached_file('''tiny-random-bert''' , A )
with self.assertRaisesRegex(A , '''is not a valid git identifier''' ):
_a = cached_file(A , A , revision='''aaaa''' )
with self.assertRaisesRegex(A , '''does not appear to have a file named''' ):
_a = cached_file(A , '''conf''' )
def a__ (self ) -> int:
"""simple docstring"""
with self.assertRaisesRegex(A , '''does not appear to have a file named''' ):
_a = cached_file(A , '''conf''' )
with open(os.path.join(A , '''refs''' , '''main''' ) ) as f:
_a = f.read()
self.assertTrue(os.path.isfile(os.path.join(A , '''.no_exist''' , A , '''conf''' ) ) )
_a = cached_file(A , '''conf''' , _raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
_a = cached_file(A , '''conf''' , local_files_only=A , _raise_exceptions_for_missing_entries=A )
self.assertIsNone(A )
_a = mock.Mock()
_a = 500
_a = {}
_a = HTTPError
_a = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head:
_a = cached_file(A , '''conf''' , _raise_exceptions_for_connection_errors=A )
self.assertIsNone(A )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ) -> int:
"""simple docstring"""
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , A ) )
def a__ (self ) -> str:
"""simple docstring"""
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(A , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , A )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(A , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , A , revision='''ahaha''' )
_a = get_file_from_repo('''bert-base-cased''' , A )
# The name is the cached name which is not very easy to test, so instead we load the content.
_a = json.loads(open(A , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 768 )
def a__ (self ) -> List[str]:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_a = Path(A ) / '''a.txt'''
filename.touch()
self.assertEqual(get_file_from_repo(A , '''a.txt''' ) , str(A ) )
self.assertIsNone(get_file_from_repo(A , '''b.txt''' ) )
| 11
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def a ( __snake_case : int = 3 ):
'''simple docstring'''
if isinstance(__snake_case, __snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
UpperCAmelCase_ :Optional[int] = QuantumRegister(__snake_case, '''qr''' )
UpperCAmelCase_ :int = ClassicalRegister(__snake_case, '''cr''' )
UpperCAmelCase_ :str = QuantumCircuit(__snake_case, __snake_case )
UpperCAmelCase_ :List[str] = number_of_qubits
for i in range(__snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), __snake_case, __snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__snake_case, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__snake_case, __snake_case )
# simulate with 10000 shots
UpperCAmelCase_ :List[Any] = Aer.get_backend('''qasm_simulator''' )
UpperCAmelCase_ :Dict = execute(__snake_case, __snake_case, shots=10000 )
return job.result().get_counts(__snake_case )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 608
| 0
|
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if resistance < 0:
raise ValueError("""Resistance cannot be negative""" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 529
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowercase () -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase = get_sagemaker_input()
else:
lowerCAmelCase = get_cluster_input()
return config
def lowercase (snake_case__ : List[str]=None ) -> int:
'''simple docstring'''
if subparsers is not None:
lowerCAmelCase = subparsers.add_parser("""config""" , description=snake_case__ )
else:
lowerCAmelCase = argparse.ArgumentParser("""Accelerate config command""" , description=snake_case__ )
parser.add_argument(
"""--config_file""" , default=snake_case__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=snake_case__ )
return parser
def lowercase (snake_case__ : str ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase = get_user_input()
if args.config_file is not None:
lowerCAmelCase = args.config_file
else:
if not os.path.isdir(snake_case__ ):
os.makedirs(snake_case__ )
lowerCAmelCase = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(snake_case__ )
else:
config.to_yaml_file(snake_case__ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowercase () -> Optional[int]:
'''simple docstring'''
lowerCAmelCase = config_command_parser()
lowerCAmelCase = parser.parse_args()
config_command(snake_case__ )
if __name__ == "__main__":
main()
| 529
| 1
|
import argparse
import tensorflow as tf
import torch
from transformers import BertConfig, BertForMaskedLM
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertPooler,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( __snake_case , __snake_case , __snake_case ):
def get_masked_lm_array(__snake_case ):
_UpperCamelCase = f"""masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_array(__snake_case ):
_UpperCamelCase = f"""encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_layer_array(__snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
def get_encoder_attention_layer_array(__snake_case , __snake_case , __snake_case ):
_UpperCamelCase = f"""encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE"""
_UpperCamelCase = tf.train.load_variable(__snake_case , __snake_case )
_UpperCamelCase = array.reshape(__snake_case )
if "kernel" in name:
_UpperCamelCase = array.transpose()
return torch.from_numpy(__snake_case )
print(f"""Loading model based on config from {config_path}...""" )
_UpperCamelCase = BertConfig.from_json_file(__snake_case )
_UpperCamelCase = BertForMaskedLM(__snake_case )
# Layers
for layer_index in range(0 , config.num_hidden_layers ):
_UpperCamelCase = model.bert.encoder.layer[layer_index]
# Self-attention
_UpperCamelCase = layer.attention.self
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/kernel''' , self_attn.query.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_query_dense/bias''' , self_attn.query.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/kernel''' , self_attn.key.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_key_dense/bias''' , self_attn.key.bias.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/kernel''' , self_attn.value.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_value_dense/bias''' , self_attn.value.bias.data.shape )
# Self-attention Output
_UpperCamelCase = layer.attention.output
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/kernel''' , self_output.dense.weight.data.shape )
_UpperCamelCase = get_encoder_attention_layer_array(
__snake_case , '''_output_dense/bias''' , self_output.dense.bias.data.shape )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_attention_layer_norm/beta''' )
# Intermediate
_UpperCamelCase = layer.intermediate
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_intermediate_dense/bias''' )
# Output
_UpperCamelCase = layer.output
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/kernel''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_dense/bias''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/gamma''' )
_UpperCamelCase = get_encoder_layer_array(__snake_case , '''_output_layer_norm/beta''' )
# Embeddings
_UpperCamelCase = get_encoder_array('''_position_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_type_embedding_layer/embeddings''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/gamma''' )
_UpperCamelCase = get_encoder_array('''_embedding_norm_layer/beta''' )
# LM Head
_UpperCamelCase = model.cls.predictions.transform
_UpperCamelCase = get_masked_lm_array('''dense/kernel''' )
_UpperCamelCase = get_masked_lm_array('''dense/bias''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/gamma''' )
_UpperCamelCase = get_masked_lm_array('''layer_norm/beta''' )
_UpperCamelCase = get_masked_lm_array('''embedding_table''' )
# Pooling
_UpperCamelCase = BertPooler(config=__snake_case )
_UpperCamelCase = get_encoder_array('''_pooler_layer/kernel''' )
_UpperCamelCase = get_encoder_array('''_pooler_layer/bias''' )
# Export final model
model.save_pretrained(__snake_case )
# Integration test - should load without any errors ;)
_UpperCamelCase = BertForMaskedLM.from_pretrained(__snake_case )
print(new_model.eval() )
print('''Model conversion was done sucessfully!''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_lowerCAmelCase = parser.parse_args()
convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 10
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = ShapEPipeline
UpperCAmelCase = ["prompt"]
UpperCAmelCase = ["prompt"]
UpperCAmelCase = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
UpperCAmelCase = False
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
return 32
@property
def UpperCamelCase_ ( self : int ):
return 32
@property
def UpperCamelCase_ ( self : List[str] ):
return self.time_input_dim * 4
@property
def UpperCamelCase_ ( self : Optional[Any] ):
return 8
@property
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def UpperCamelCase_ ( self : List[Any] ):
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCamelCase_ ( self : int ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
_UpperCamelCase = PriorTransformer(**_A )
return model
@property
def UpperCamelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
_UpperCamelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
_UpperCamelCase = ShapERenderer(**_A )
return model
def UpperCamelCase_ ( self : str ):
_UpperCamelCase = self.dummy_prior
_UpperCamelCase = self.dummy_text_encoder
_UpperCamelCase = self.dummy_tokenizer
_UpperCamelCase = self.dummy_renderer
_UpperCamelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=_A , clip_sample=_A , clip_sample_range=1.0 , )
_UpperCamelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def UpperCamelCase_ ( self : Tuple , _A : Tuple , _A : Optional[int]=0 ):
if str(_A ).startswith('''mps''' ):
_UpperCamelCase = torch.manual_seed(_A )
else:
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(_A )
_UpperCamelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = '''cpu'''
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = pipe(**self.get_dummy_inputs(_A ) )
_UpperCamelCase = output.images[0]
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase_ ( self : Any ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = torch_device == '''cpu'''
_UpperCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_A , relax_max_difference=_A , )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = self.pipeline_class(**_A )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = 1
_UpperCamelCase = 2
_UpperCamelCase = self.get_dummy_inputs(_A )
for key in inputs.keys():
if key in self.batch_params:
_UpperCamelCase = batch_size * [inputs[key]]
_UpperCamelCase = pipe(**_A , num_images_per_prompt=_A )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
_UpperCamelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
_UpperCamelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
_UpperCamelCase = torch.Generator(device=_A ).manual_seed(0 )
_UpperCamelCase = pipe(
'''a shark''' , generator=_A , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_A , _A )
| 10
| 1
|
'''simple docstring'''
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A: List[Any] = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( __lowerCAmelCase, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = GPTSwaTokenizer
SCREAMING_SNAKE_CASE_ : str = False
SCREAMING_SNAKE_CASE_ : Dict = True
SCREAMING_SNAKE_CASE_ : str = False
def lowerCamelCase__ ( self ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ : Dict = GPTSwaTokenizer(_UpperCamelCase , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
lowercase_ : Optional[int] = """This is a test"""
lowercase_ : Any = """This is a test"""
return input_text, output_text
def lowerCamelCase__ ( self ) -> Dict:
lowercase_ : List[str] = """<s>"""
lowercase_ : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Tuple = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCamelCase ) , 2000 )
def lowerCamelCase__ ( self ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def lowerCamelCase__ ( self ) -> List[str]:
lowercase_ : Optional[Any] = GPTSwaTokenizer(_UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCamelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [465, 287, 265, 631, 842] )
lowercase_ : Union[str, Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
_UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowercase_ : Any = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
# fmt: off
self.assertListEqual(
_UpperCamelCase , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def lowerCamelCase__ ( self ) -> str:
lowercase_ : List[str] = GPTSwaTokenizer(_UpperCamelCase )
lowercase_ : Optional[int] = ["""This is a test""", """I was born in 92000, and this is falsé."""]
lowercase_ : Optional[Any] = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(_UpperCamelCase ) , _UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(_UpperCamelCase , _UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(_UpperCamelCase ) , _UpperCamelCase )
@slow
def lowerCamelCase__ ( self ) -> Tuple:
lowercase_ : Dict = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Häj sväjs lillebrör! =)""",
"""Det är inget fel på Mr. Cool""",
]
# fmt: off
lowercase_ : List[str] = {"""input_ids""": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name='AI-Sweden/gpt-sw3-126m' , sequences=_UpperCamelCase , )
| 714
|
'''simple docstring'''
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def _UpperCAmelCase ( a : Dict , a : Optional[int] , a : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, oder?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowercase_ : List[str] = {
'ru-en': ['[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)', '39.20'],
'en-ru': ['[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)', '33.47'],
'en-de': ['[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)', '42.83'],
'de-en': ['[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)', '41.35'],
}
lowercase_ : Optional[Any] = f"{src_lang}-{tgt_lang}"
lowercase_ : Optional[Any] = f"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(a , exist_ok=a )
lowercase_ : int = os.path.join(a , 'README.md' )
print(f"Generating {path}" )
with open(a , 'w' , encoding='utf-8' ) as f:
f.write(a )
# make sure we are under the root of the project
A: List[str] = Path(__file__).resolve().parent.parent.parent
A: List[str] = repo_dir / "model_cards"
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
A , A , A: Any = model_name.split("-")
A: int = model_cards_dir / "facebook" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 7
| 0
|
def _lowerCAmelCase ( __magic_name__ :int ):
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('Program to check whether a number is a Perfect number or not...')
_lowerCamelCase : Tuple = int(input('Enter number: ').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 121
|
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowerCamelCase : List[Any] = logging.getLogger(__name__)
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase_ : Tuple=-1 ) -> List[Any]:
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ = label_idx
def UpperCamelCase ( self : int , lowerCAmelCase_ : Any , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(''' ''' )
words.append(splits[0] )
if len(lowerCAmelCase_ ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
return examples
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> str:
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(lowerCAmelCase_ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + ''' ''' + preds_list[example_id].pop(0 ) + '''\n'''
writer.write(lowerCAmelCase_ )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def __init__( self : str ) -> Dict:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ['''O'''] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case__ ( __snake_case ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : Union[Split, str] ) -> List[InputExample]:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase_ , F'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as f:
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
if words:
examples.append(InputExample(guid=F'''{mode}-{guid_index}''' , words=lowerCAmelCase_ , labels=lowerCAmelCase_ ) )
guid_index += 1
return examples
def UpperCamelCase ( self : int , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : TextIO , lowerCAmelCase_ : List ) -> List[str]:
UpperCAmelCase_ = 0
for sentence in parse_incr(lowerCAmelCase_ ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ''''''
for token in sentence:
out += F'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCAmelCase_ )
example_id += 1
def UpperCamelCase ( self : Dict , lowerCAmelCase_ : str ) -> List[str]:
if path:
with open(lowerCAmelCase_ , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 121
| 1
|
def _lowerCAmelCase ( __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def _lowerCAmelCase ( __lowerCamelCase : dict[int, list[int]] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = 0
__SCREAMING_SNAKE_CASE : Any = len(__lowerCamelCase ) # No of vertices in graph
__SCREAMING_SNAKE_CASE : Tuple = [0] * n
__SCREAMING_SNAKE_CASE : Dict = [False] * n
def dfs(__lowerCamelCase : Optional[int] , __lowerCamelCase : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Any ):
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : int = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , id_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
__SCREAMING_SNAKE_CASE : List[Any] = min(low[at] , low[to] )
__SCREAMING_SNAKE_CASE : list[tuple[int, int]] = []
for i in range(__lowerCamelCase ):
if not visited[i]:
dfs(__lowerCamelCase , -1 , __lowerCamelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447
|
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase = 128022
_lowerCamelCase = 128028
@require_sentencepiece
class _SCREAMING_SNAKE_CASE (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = MaMaaaTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
def __snake_case ( self : Union[str, Any] )->Optional[Any]:
super().setUp()
__SCREAMING_SNAKE_CASE : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__SCREAMING_SNAKE_CASE : List[str] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : int = Path(self.tmpdirname )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[int] , **UpperCamelCase : Any )->Dict:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : List[str] )->int:
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : str )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = "</s>"
__SCREAMING_SNAKE_CASE : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def __snake_case ( self : Tuple )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __snake_case ( self : Dict )->Dict:
pass
def __snake_case ( self : Union[str, Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2, 3, 4, 5, 6] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_string(UpperCamelCase )
self.assertEqual(UpperCamelCase , "This is a test" )
@slow
def __snake_case ( self : Any )->Union[str, Any]:
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = """facebook/m2m100_418M"""
lowerCAmelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCAmelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCAmelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __snake_case ( cls : List[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
return cls
def __snake_case ( self : Dict )->Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __snake_case ( self : Any )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase )
def __snake_case ( self : List[Any] )->str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "en"
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def __snake_case ( self : Union[str, Any] )->List[Any]:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def __snake_case ( self : Any )->List[Any]:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase )
@require_torch
def __snake_case ( self : Any )->int:
__SCREAMING_SNAKE_CASE : List[str] = "en"
__SCREAMING_SNAKE_CASE : Dict = "fr"
__SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Optional[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __snake_case ( self : Optional[Any] )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __snake_case ( self : Tuple )->Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 447
| 1
|
'''simple docstring'''
import unittest
import numpy as np
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , ):
"""simple docstring"""
lowercase = np.shape(lowerCAmelCase_ )
lowercase = np.shape(lowerCAmelCase_ )
lowercase = np.shape(lowerCAmelCase_ )
if shape_a[0] != shape_b[0]:
lowercase = (
"Expected the same number of rows for A and B. "
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(lowerCAmelCase_ )
if shape_b[1] != shape_c[1]:
lowercase = (
"Expected the same number of columns for B and C. "
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(lowerCAmelCase_ )
lowercase = pseudo_inv
if a_inv is None:
try:
lowercase = np.linalg.inv(lowerCAmelCase_ )
except np.linalg.LinAlgError:
raise ValueError(
"Input matrix A is not invertible. Cannot compute Schur complement." )
return mat_c - mat_b.T @ a_inv @ mat_b
class UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase__ (self : Optional[int] ) -> None:
lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase = np.array([[2, 1], [6, 3]] )
lowercase = schur_complement(A__ , A__ , A__ )
lowercase = np.block([[a, b], [b.T, c]] )
lowercase = np.linalg.det(A__ )
lowercase = np.linalg.det(A__ )
lowercase = np.linalg.det(A__ )
self.assertAlmostEqual(A__ , det_a * det_s )
def UpperCAmelCase__ (self : Optional[int] ) -> None:
lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase = np.array([[2, 1], [6, 3]] )
with self.assertRaises(A__ ):
schur_complement(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[str] ) -> None:
lowercase = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(A__ ):
schur_complement(A__ , A__ , A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 310
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[Any] = 256
class UpperCAmelCase ( _lowercase ):
UpperCAmelCase : Union[str, Any] = ['''melgan''']
def __init__(self : Optional[Any] , A__ : SpectrogramNotesEncoder , A__ : SpectrogramContEncoder , A__ : TaFilmDecoder , A__ : DDPMScheduler , A__ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase = math.log(1e-5 ) # Matches MelGAN training.
lowercase = 4.0 # Largest value for most examples
lowercase = 1_2_8
self.register_modules(
notes_encoder=A__ , continuous_encoder=A__ , decoder=A__ , scheduler=A__ , melgan=A__ , )
def UpperCAmelCase__ (self : Union[str, Any] , A__ : Any , A__ : Tuple=(-1.0, 1.0) , A__ : Any=False ) -> Any:
lowercase , lowercase = output_range
if clip:
lowercase = torch.clip(A__ , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self : Tuple , A__ : Any , A__ : List[str]=(-1.0, 1.0) , A__ : Any=False ) -> str:
lowercase , lowercase = input_range
lowercase = torch.clip(A__ , A__ , A__ ) if clip else outputs
# Scale to [0, 1].
lowercase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self : List[str] , A__ : Optional[int] , A__ : Optional[Any] , A__ : List[Any] ) -> Dict:
lowercase = input_tokens > 0
lowercase , lowercase = self.notes_encoder(
encoder_input_tokens=A__ , encoder_inputs_mask=A__ )
lowercase , lowercase = self.continuous_encoder(
encoder_inputs=A__ , encoder_inputs_mask=A__ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self : int , A__ : int , A__ : Optional[int] , A__ : List[Any] ) -> str:
lowercase = noise_time
if not torch.is_tensor(A__ ):
lowercase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A__ ) and len(timesteps.shape ) == 0:
lowercase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase = self.decoder(
encodings_and_masks=A__ , decoder_input_tokens=A__ , decoder_noise_time=A__ )
return logits
@torch.no_grad()
def __call__(self : int , A__ : List[List[int]] , A__ : Optional[torch.Generator] = None , A__ : int = 1_0_0 , A__ : bool = True , A__ : str = "numpy" , A__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A__ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__ )}.' )
lowercase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
for i, encoder_input_tokens in enumerate(A__ ):
if i == 0:
lowercase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A__ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase = ones
lowercase = self.scale_features(
A__ , output_range=[-1.0, 1.0] , clip=A__ )
lowercase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A__ , continuous_mask=A__ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A__ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A__ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase = self.decode(
encodings_and_masks=A__ , input_tokens=A__ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase = self.scheduler.step(A__ , A__ , A__ , generator=A__ ).prev_sample
lowercase = self.scale_to_features(A__ , input_range=[-1.0, 1.0] )
lowercase = mel[:1]
lowercase = mel.cpu().float().numpy()
lowercase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ )
logger.info("Generated segment" , A__ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
lowercase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A__ )
| 310
| 1
|
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__(self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
super().__init__()
self.register_modules(
vae=_SCREAMING_SNAKE_CASE , text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE , )
def lowerCamelCase(self , lowerCAmelCase_ = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
A_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_SCREAMING_SNAKE_CASE )
def lowerCamelCase(self ):
self.enable_attention_slicing(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__(self , lowerCAmelCase_ , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 512 , lowerCAmelCase_ = 50 , lowerCAmelCase_ = 7.5 , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = 0.0 , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "pil" , lowerCAmelCase_ = True , lowerCAmelCase_ = None , lowerCAmelCase_ = 1 , lowerCAmelCase_ = None , **lowerCAmelCase_ , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : str = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(_SCREAMING_SNAKE_CASE )}.""" )
# get prompt text embeddings
A_ : List[str] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
A_ : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A_ : str = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
A_ : Optional[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
A_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
A_ , A_ , A_ : Union[str, Any] = text_embeddings.shape
A_ : Tuple = text_embeddings.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
A_ : Any = text_embeddings.view(bs_embed * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
A_ : Optional[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
A_ : Any = 42
if negative_prompt is None:
A_ : List[str] = [""""""]
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="""
f""" {type(_SCREAMING_SNAKE_CASE )}.""" )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Any = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
A_ : Union[str, Any] = negative_prompt
A_ : Union[str, Any] = text_input_ids.shape[-1]
A_ : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , )
A_ : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A_ : Any = uncond_embeddings.shape[1]
A_ : str = uncond_embeddings.repeat(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 1 )
A_ : Optional[int] = uncond_embeddings.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A_ : Optional[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
A_ : List[str] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
A_ : List[str] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
A_ : Dict = torch.randn(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device="""cpu""" , dtype=_SCREAMING_SNAKE_CASE ).to(self.device )
A_ : Tuple = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device="""cpu""" , dtype=_SCREAMING_SNAKE_CASE ).to(
self.device )
else:
A_ : int = torch.randn(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
A_ : Tuple = torch.randn(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
A_ : Dict = latents_reference.to(self.device )
A_ : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
A_ : Optional[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
A_ : int = (latents_shape[2] - latents_shape_reference[2]) // 2
A_ : List[str] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
A_ : str = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
A_ : Any = 0 if dx < 0 else dx
A_ : Dict = 0 if dy < 0 else dy
A_ : Optional[int] = max(-dx , 0 )
A_ : Optional[Any] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
A_ : Union[str, Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
A_ : Dict = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A_ : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Any = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : Optional[int] = {}
if accepts_eta:
A_ : str = eta
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
A_ : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A_ : List[Any] = self.scheduler.scale_model_input(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# predict the noise residual
A_ : List[str] = self.unet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE ).sample
# perform guidance
if do_classifier_free_guidance:
A_ , A_ : Dict = noise_pred.chunk(2 )
A_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
A_ : str = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
A_ : Tuple = 1 / 0.18215 * latents
A_ : str = self.vae.decode(_SCREAMING_SNAKE_CASE ).sample
A_ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
A_ : int = self.feature_extractor(self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) , return_tensors="""pt""" ).to(
self.device )
A_ , A_ : Union[str, Any] = self.safety_checker(
images=_SCREAMING_SNAKE_CASE , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
A_ : Optional[int] = None
if output_type == "pil":
A_ : str = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=_SCREAMING_SNAKE_CASE , nsfw_content_detected=_SCREAMING_SNAKE_CASE )
| 713
|
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase(self ):
A_ : Optional[int] = get_activation("""swish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Dict = get_activation("""silu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : Optional[Any] = get_activation("""mish""" )
self.assertIsInstance(lowerCAmelCase_ , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase(self ):
A_ : int = get_activation("""gelu""" )
self.assertIsInstance(lowerCAmelCase_ , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 480
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/unispeech-sat-base-100h-libri-ft""": (
"""https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json"""
),
# See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase_ ):
_UpperCamelCase : Dict = '''unispeech-sat'''
def __init__( self : List[Any] , a : List[str]=32 , a : Optional[int]=768 , a : int=12 , a : Union[str, Any]=12 , a : Optional[Any]=3_072 , a : List[str]="gelu" , a : List[str]=0.1 , a : int=0.1 , a : Optional[int]=0.1 , a : Any=0.0 , a : List[Any]=0.0 , a : List[str]=0.1 , a : List[str]=0.1 , a : Optional[Any]=0.02 , a : Dict=1E-5 , a : List[Any]="group" , a : Union[str, Any]="gelu" , a : Dict=(512, 512, 512, 512, 512, 512, 512) , a : Dict=(5, 2, 2, 2, 2, 2, 2) , a : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , a : Tuple=False , a : int=128 , a : Optional[int]=16 , a : int=False , a : List[Any]=True , a : List[str]=0.05 , a : List[str]=10 , a : str=2 , a : Union[str, Any]=0.0 , a : Optional[Any]=10 , a : Tuple=0 , a : Union[str, Any]=320 , a : str=2 , a : List[Any]=0.1 , a : Optional[int]=100 , a : Optional[int]=256 , a : Dict=256 , a : List[str]=0.1 , a : List[Any]="mean" , a : str=False , a : Tuple=False , a : str=256 , a : Dict=(512, 512, 512, 512, 1_500) , a : Optional[Any]=(5, 3, 3, 1, 1) , a : Any=(1, 2, 3, 1, 1) , a : List[str]=512 , a : Union[str, Any]=0 , a : Any=1 , a : Union[str, Any]=2 , a : List[str]=504 , **a : Tuple , )-> int:
"""simple docstring"""
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layerdrop
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
lowercase__ = num_clusters
lowercase__ = do_stable_layer_norm
lowercase__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowercase__ = num_codevectors_per_group
lowercase__ = num_codevector_groups
lowercase__ = contrastive_logits_temperature
lowercase__ = feat_quantizer_dropout
lowercase__ = num_negatives
lowercase__ = codevector_dim
lowercase__ = proj_codevector_dim
lowercase__ = diversity_loss_weight
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowercase__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = list(lowerCamelCase__ )
lowercase__ = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 235
|
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
UpperCamelCase = 'CompVis/stable-diffusion-v1-1'
UpperCamelCase = 'CompVis/stable-diffusion-v1-2'
UpperCamelCase = 'CompVis/stable-diffusion-v1-3'
UpperCamelCase = 'CompVis/stable-diffusion-v1-4'
class _A ( UpperCAmelCase_ ):
def __init__( self : List[str] , lowerCamelCase__ : AutoencoderKL , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : UNetaDConditionModel , lowerCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCamelCase__ : StableDiffusionSafetyChecker , lowerCamelCase__ : CLIPImageProcessor , lowerCamelCase__ : bool = True , ):
"""simple docstring"""
super()._init_()
__UpperCamelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : int = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : List[str] = StableDiffusionPipeline.from_pretrained(lowerCamelCase__ )
__UpperCamelCase : str = StableDiffusionPipeline(
vae=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , unet=lowerCamelCase__ , scheduler=lowerCamelCase__ , safety_checker=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , requires_safety_checker=lowerCamelCase__ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return {k: getattr(self , lowerCamelCase__ ) for k in self.config.keys() if not k.startswith("""_""" )}
def a ( self : List[str] , lowerCamelCase__ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCamelCase : Dict = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase__ )
def a ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(lowerCamelCase__ )
@torch.no_grad()
def a ( self : Any , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : str , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Union[str, Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Any , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Dict , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : List[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
@torch.no_grad()
def a ( self : Optional[Any] , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 5_12 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : float = 7.5 , lowerCamelCase__ : Optional[Union[str, List[str]]] = None , lowerCamelCase__ : Optional[int] = 1 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , **lowerCamelCase__ : Dict , ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(lowerCamelCase__ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` must be divisible by 8 but are {height} and {width}.' )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCamelCase : Optional[int] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCamelCase : int = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCamelCase : Optional[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCamelCase : List[Any] = self.textaimg_sda_a(
prompt=lowerCamelCase__ , height=lowerCamelCase__ , width=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , guidance_scale=lowerCamelCase__ , negative_prompt=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , eta=lowerCamelCase__ , generator=lowerCamelCase__ , latents=lowerCamelCase__ , output_type=lowerCamelCase__ , return_dict=lowerCamelCase__ , callback=lowerCamelCase__ , callback_steps=lowerCamelCase__ , **lowerCamelCase__ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 269
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __snake_case (_a ):
lowerCAmelCase__ = ["image_processor", "tokenizer"]
lowerCAmelCase__ = "OwlViTImageProcessor"
lowerCAmelCase__ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Union[str, Any] , _UpperCAmelCase : Any=None , _UpperCAmelCase : str=None , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _UpperCAmelCase , )
_lowerCAmelCase : int = kwargs.pop("""feature_extractor""" )
_lowerCAmelCase : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_UpperCAmelCase , _UpperCAmelCase )
def __call__( self : str , _UpperCAmelCase : Dict=None , _UpperCAmelCase : List[Any]=None , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : List[str]="max_length" , _UpperCAmelCase : Dict="np" , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) or (isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(text[0] , _UpperCAmelCase )):
_lowerCAmelCase : Optional[Any] = [self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )]
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(text[0] , _UpperCAmelCase ):
_lowerCAmelCase : Any = []
# Maximum number of queries across batch
_lowerCAmelCase : Any = max([len(_UpperCAmelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_UpperCAmelCase ) != max_num_queries:
_lowerCAmelCase : List[str] = t + [""" """] * (max_num_queries - len(_UpperCAmelCase ))
_lowerCAmelCase : Any = self.tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
encodings.append(_UpperCAmelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_lowerCAmelCase : Union[str, Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : Union[str, Any] = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_lowerCAmelCase : List[Any] = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : Optional[int] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_lowerCAmelCase : Optional[Any] = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_lowerCAmelCase : Any = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_lowerCAmelCase : Optional[Any] = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_lowerCAmelCase : Union[str, Any] = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_lowerCAmelCase : Optional[Any] = BatchEncoding()
_lowerCAmelCase : Optional[int] = input_ids
_lowerCAmelCase : Optional[Any] = attention_mask
if query_images is not None:
_lowerCAmelCase : Tuple = BatchEncoding()
_lowerCAmelCase : Optional[int] = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase ).pixel_values
_lowerCAmelCase : List[str] = query_pixel_values
if images is not None:
_lowerCAmelCase : Optional[Any] = self.image_processor(_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
if text is not None and images is not None:
_lowerCAmelCase : Optional[Any] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_lowerCAmelCase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ) , tensor_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.image_processor.post_process(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
return self.tokenizer.decode(*_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _UpperCAmelCase , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _UpperCAmelCase , )
return self.image_processor
| 196
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case (_a ):
lowerCAmelCase__ = (PNDMScheduler,)
lowerCAmelCase__ = (("num_inference_steps", 5_0),)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , **_UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Any , _UpperCAmelCase : Union[str, Any]=0 , **_UpperCAmelCase : str ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_lowerCAmelCase : str = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : List[str] = 0.1 * sample
_lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : str = scheduler_class.from_pretrained(_UpperCAmelCase )
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : int = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : List[str] = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Any=0 , **_UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : int = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
_lowerCAmelCase : List[Any] = self.dummy_sample
_lowerCAmelCase : Dict = 0.1 * sample
_lowerCAmelCase : Dict = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Any = self.get_scheduler_config()
_lowerCAmelCase : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : str = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = scheduler_class.from_pretrained(_UpperCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(_UpperCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Any = dummy_past_residuals[:]
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : str = new_scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
_lowerCAmelCase : Any = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Any = new_scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def SCREAMING_SNAKE_CASE ( self : Any , **_UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(**_UpperCAmelCase )
_lowerCAmelCase : List[str] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : List[Any] = 10
_lowerCAmelCase : Any = self.dummy_model()
_lowerCAmelCase : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for i, t in enumerate(scheduler.prk_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_lowerCAmelCase : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Dict = scheduler.step_plms(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , _UpperCAmelCase )
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Tuple = self.get_scheduler_config()
_lowerCAmelCase : Union[str, Any] = scheduler_class(**_UpperCAmelCase )
_lowerCAmelCase : str = self.dummy_sample
_lowerCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(_UpperCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(_UpperCAmelCase )
elif num_inference_steps is not None and not hasattr(_UpperCAmelCase , """set_timesteps""" ):
_lowerCAmelCase : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_lowerCAmelCase : List[str] = dummy_past_residuals[:]
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[int] = scheduler.step_prk(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_lowerCAmelCase : Tuple = scheduler.step_plms(_UpperCAmelCase , 0 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
_lowerCAmelCase : Optional[Any] = scheduler.step_plms(_UpperCAmelCase , 1 , _UpperCAmelCase , **_UpperCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
_lowerCAmelCase : Dict = self.scheduler_classes[0]
_lowerCAmelCase : Any = self.get_scheduler_config(steps_offset=1 )
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 27
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[Any] = self.dummy_sample
_lowerCAmelCase : Any = 0.1 * sample
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(_UpperCAmelCase )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_lowerCAmelCase : List[Any] = scheduler.step_prk(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaises(_UpperCAmelCase ):
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : List[str] = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**_UpperCAmelCase )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.full_loop()
_lowerCAmelCase : str = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : str = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(_UpperCAmelCase ) )
_lowerCAmelCase : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 196
| 1
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __UpperCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : str , _lowercase : Dict , _lowercase : Dict=13 , _lowercase : List[Any]=7 , _lowercase : Tuple=True , _lowercase : str=True , _lowercase : int=False , _lowercase : Optional[Any]=True , _lowercase : int=99 , _lowercase : Dict=32 , _lowercase : Dict=5 , _lowercase : List[str]=4 , _lowercase : Optional[Any]=64 , _lowercase : Any="gelu" , _lowercase : List[str]=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : Any=512 , _lowercase : List[str]=16 , _lowercase : Union[str, Any]=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Dict=3 , _lowercase : List[Any]=4 , _lowercase : int=None , _lowercase : Optional[int]=2 , _lowercase : Tuple=2 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=2 , _lowercase : List[str]=4 , _lowercase : Dict=1 , ) -> List[Any]:
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = type_sequence_label_size
A_ = initializer_range
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = q_groups
A_ = k_groups
A_ = v_groups
A_ = post_attention_groups
A_ = intermediate_groups
A_ = output_groups
def __snake_case ( self : str) -> str:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length])
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A_ = ids_tensor([self.batch_size] , self.num_choices)
A_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Union[str, Any]) -> List[Any]:
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __snake_case ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : str , _lowercase : Dict , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : List[str]) -> List[str]:
A_ = SqueezeBertModel(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , _lowercase)
A_ = model(_lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __snake_case ( self : List[str] , _lowercase : Optional[Any] , _lowercase : Tuple , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] , _lowercase : List[Any]) -> Optional[Any]:
A_ = SqueezeBertForMaskedLM(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __snake_case ( self : int , _lowercase : Optional[int] , _lowercase : List[str] , _lowercase : int , _lowercase : List[Any] , _lowercase : Union[str, Any] , _lowercase : List[str]) -> Optional[Any]:
A_ = SqueezeBertForQuestionAnswering(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(
_lowercase , attention_mask=_lowercase , start_positions=_lowercase , end_positions=_lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __snake_case ( self : Tuple , _lowercase : str , _lowercase : Optional[int] , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Any , _lowercase : Any) -> Any:
A_ = self.num_labels
A_ = SqueezeBertForSequenceClassification(_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : int , _lowercase : Any , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Optional[Any]) -> List[str]:
A_ = self.num_labels
A_ = SqueezeBertForTokenClassification(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __snake_case ( self : Optional[int] , _lowercase : int , _lowercase : Dict , _lowercase : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : int) -> Tuple:
A_ = self.num_choices
A_ = SqueezeBertForMultipleChoice(config=_lowercase)
model.to(_lowercase)
model.eval()
A_ = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A_ = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
A_ = model(
_lowercase , attention_mask=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __snake_case ( self : Optional[Any]) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
(A_) = config_and_inputs
A_ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
_UpperCamelCase = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = True
_UpperCamelCase = False
def __snake_case ( self : Union[str, Any]) -> Dict:
A_ = SqueezeBertModelTester(self)
A_ = ConfigTester(self , config_class=_lowercase , dim=37)
def __snake_case ( self : str) -> str:
self.config_tester.run_common_tests()
def __snake_case ( self : str) -> int:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> Tuple:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> Any:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_lowercase)
def __snake_case ( self : Union[str, Any]) -> List[str]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_lowercase)
def __snake_case ( self : Optional[int]) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_lowercase)
def __snake_case ( self : Any) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_lowercase)
@slow
def __snake_case ( self : Optional[int]) -> Dict:
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = SqueezeBertModel.from_pretrained(_lowercase)
self.assertIsNotNone(_lowercase)
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __snake_case ( self : Optional[Any]) -> List[str]:
A_ = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli')
A_ = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]])
A_ = model(_lowercase)[0]
A_ = torch.Size((1, 3))
self.assertEqual(output.shape , _lowercase)
A_ = torch.tensor([[0.64_01, -0.03_49, -0.60_41]])
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-4))
| 366
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
def A_ ( self : Any ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ , lowerCamelCase__ : str = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : int = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : int = controlnet_params
lowerCamelCase__ : Any = 'bird'
lowerCamelCase__ : Dict = jax.device_count()
lowerCamelCase__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
lowerCamelCase__ : Tuple = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCamelCase__ : List[str] = jax.random.PRNGKey(0 )
lowerCamelCase__ : Optional[int] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : List[Any] = replicate(UpperCAmelCase )
lowerCamelCase__ : Dict = shard(UpperCAmelCase )
lowerCamelCase__ : Any = shard(UpperCAmelCase )
lowerCamelCase__ : Any = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : List[str] = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : Tuple = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A_ ( self : List[str] ) -> Union[str, Any]:
lowerCamelCase__ , lowerCamelCase__ : Dict = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=UpperCAmelCase , from_pt=UpperCAmelCase , dtype=jnp.bfloataa )
lowerCamelCase__ : Optional[int] = controlnet_params
lowerCamelCase__ : Any = 'Chef in the kitchen'
lowerCamelCase__ : str = jax.device_count()
lowerCamelCase__ : Union[str, Any] = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCamelCase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
lowerCamelCase__ : List[str] = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCamelCase__ : Tuple = jax.random.PRNGKey(0 )
lowerCamelCase__ : List[str] = jax.random.split(UpperCAmelCase , jax.device_count() )
lowerCamelCase__ : Dict = replicate(UpperCAmelCase )
lowerCamelCase__ : List[str] = shard(UpperCAmelCase )
lowerCamelCase__ : Tuple = shard(UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = pipe(
prompt_ids=UpperCAmelCase , image=UpperCAmelCase , params=UpperCAmelCase , prng_seed=UpperCAmelCase , num_inference_steps=50 , jit=UpperCAmelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCamelCase__ : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCamelCase__ : int = images[0, 253:256, 253:256, -1]
lowerCamelCase__ : Optional[int] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCamelCase__ : int = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 295
| 0
|
def _a ( lowercase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def _a ( lowercase__ : int = 50_00 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )]
for i, pentagonal_i in enumerate(lowercase__ ):
for j in range(lowercase__ , len(lowercase__ ) ):
SCREAMING_SNAKE_CASE__ : str = pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ : List[str] = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 636
|
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
SCREAMING_SNAKE_CASE__ : Any = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class snake_case ( unittest.TestCase ):
def __init__( self : List[Any] , a_ : Optional[int] , a_ : Dict=7 , a_ : Any=3 , a_ : Any=18 , a_ : int=30 , a_ : int=400 , a_ : List[Any]=None , a_ : int=True , a_ : int=True , a_ : Dict=None , )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'height': 20, 'width': 20}
SCREAMING_SNAKE_CASE__ : str = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = min_resolution
SCREAMING_SNAKE_CASE__ : Dict = max_resolution
SCREAMING_SNAKE_CASE__ : List[Any] = size
SCREAMING_SNAKE_CASE__ : Tuple = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_convert_rgb
SCREAMING_SNAKE_CASE__ : List[str] = [512, 1024, 2048, 4096]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase( self : Optional[Any] )-> str:
"""simple docstring"""
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase( self : Dict )-> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
SCREAMING_SNAKE_CASE__ : str = Image.open(requests.get(a_ , stream=a_ ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : List[str] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = PixaStructImageProcessingTester(self )
@property
def __lowercase( self : Dict )-> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : List[Any] )-> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.image_processor_tester.prepare_dummy_image()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
SCREAMING_SNAKE_CASE__ : List[Any] = 2048
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(a_ , return_tensors='pt' , max_patches=a_ )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def __lowercase( self : Any )-> Tuple:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : Any )-> Any:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
SCREAMING_SNAKE_CASE__ : int = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(a_ ):
SCREAMING_SNAKE_CASE__ : Dict = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
SCREAMING_SNAKE_CASE__ : List[Any] = 'Hello'
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = image_processor(
a_ , return_tensors='pt' , max_patches=a_ , header_text=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : List[Any] )-> Dict:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
SCREAMING_SNAKE_CASE__ : str = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def __lowercase( self : str )-> Optional[Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class snake_case ( UpperCamelCase_ , unittest.TestCase ):
lowercase_ = PixaStructImageProcessor if is_vision_available() else None
def __lowercase( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = PixaStructImageProcessingTester(self , num_channels=4 )
SCREAMING_SNAKE_CASE__ : Dict = 3
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase( self : Dict )-> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , 'do_normalize' ) )
self.assertTrue(hasattr(a_ , 'do_convert_rgb' ) )
def __lowercase( self : str )-> Union[str, Any]:
"""simple docstring"""
# Initialize image_processor
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processor(
image_inputs[0] , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(
a_ , return_tensors='pt' , max_patches=a_ ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 636
| 1
|
'''simple docstring'''
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = JukeboxTokenizer
_UpperCAmelCase = {
'''artist''': '''Zac Brown Band''',
'''genres''': '''Country''',
'''lyrics''': '''I met a traveller from an antique land,
Who said "Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
''',
}
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
import torch
snake_case = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''' )
snake_case = tokenizer(**self.metas )['''input_ids''']
# fmt: off
snake_case = [
torch.tensor([[
0, 0, 0, 71_69, 5_07, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
torch.tensor([[0, 0, 0, 10_69, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def UpperCamelCase ( self ) -> Optional[int]:
import torch
snake_case = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''' )
snake_case = tokenizer(**self.metas )['''input_ids''']
# fmt: off
snake_case = [
torch.tensor([[
0, 0, 0, 10_69, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 10_69, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 342
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Any:
snake_case = '''ylacombe/bark-small'''
snake_case = tempfile.mkdtemp()
snake_case = '''en_speaker_1'''
snake_case = '''This is a test string'''
snake_case = '''speaker_embeddings_path.json'''
snake_case = '''speaker_embeddings'''
def UpperCamelCase ( self , **A__ ) -> int:
return AutoTokenizer.from_pretrained(self.checkpoint , **A__ )
def UpperCamelCase ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=A__ )
processor.save_pretrained(self.tmpdirname )
snake_case = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase ( self ) -> Tuple:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
snake_case = 35
snake_case = 2
snake_case = 8
snake_case = {
'''semantic_prompt''': np.ones(A__ ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
snake_case = processor(text=self.input_string , voice_preset=A__ )
snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
snake_case = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(A__ , **A__ )
snake_case = processor(text=self.input_string , voice_preset=A__ )
snake_case = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
snake_case = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase ( self ) -> int:
snake_case = self.get_tokenizer()
snake_case = BarkProcessor(tokenizer=A__ )
snake_case = processor(text=self.input_string )
snake_case = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=A__ , return_attention_mask=A__ , return_token_type_ids=A__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 342
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
A_ = logging.getLogger(__name__)
def A_ ( ):
SCREAMING_SNAKE_CASE:Optional[int] = argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=snake_case , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=snake_case , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=snake_case , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=snake_case , default="data/dump" , help="The dump file prefix." )
SCREAMING_SNAKE_CASE:Any = parser.parse_args()
logger.info(F'''Loading Tokenizer ({args.tokenizer_name})''' )
if args.tokenizer_type == "bert":
SCREAMING_SNAKE_CASE:Union[str, Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE:Any = tokenizer.special_tokens_map["cls_token"] # `[CLS]`
SCREAMING_SNAKE_CASE:int = tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
SCREAMING_SNAKE_CASE:Any = RobertaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE:Optional[int] = tokenizer.special_tokens_map["cls_token"] # `<s>`
SCREAMING_SNAKE_CASE:Optional[int] = tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
SCREAMING_SNAKE_CASE:List[str] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
SCREAMING_SNAKE_CASE:int = tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
SCREAMING_SNAKE_CASE:Any = tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F'''Loading text from {args.file_path}''' )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
SCREAMING_SNAKE_CASE:Any = fp.readlines()
logger.info("Start encoding" )
logger.info(F'''{len(snake_case )} examples to process.''' )
SCREAMING_SNAKE_CASE:List[Any] = []
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:int = 10000
SCREAMING_SNAKE_CASE:int = time.time()
for text in data:
SCREAMING_SNAKE_CASE:List[str] = F'''{bos} {text.strip()} {sep}'''
SCREAMING_SNAKE_CASE:Tuple = tokenizer.encode(snake_case , add_special_tokens=snake_case )
rslt.append(snake_case )
iter += 1
if iter % interval == 0:
SCREAMING_SNAKE_CASE:int = time.time()
logger.info(F'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''' )
SCREAMING_SNAKE_CASE:Optional[int] = time.time()
logger.info("Finished binarization" )
logger.info(F'''{len(snake_case )} examples processed.''' )
SCREAMING_SNAKE_CASE:str = F'''{args.dump_file}.{args.tokenizer_name}.pickle'''
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
SCREAMING_SNAKE_CASE:Union[str, Any] = [np.uintaa(snake_case ) for d in rslt]
else:
SCREAMING_SNAKE_CASE:str = [np.intaa(snake_case ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'''Dump to {dp_file}''' )
with open(snake_case , "wb" ) as handle:
pickle.dump(rslt_ , snake_case , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 712
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=_a ):
_A : Any = ['''torch''', '''torchsde''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Tuple ):
requires_backends(self ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : Dict ,*SCREAMING_SNAKE_CASE__ : Dict ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
@classmethod
def __UpperCamelCase ( cls : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[Any] ):
requires_backends(cls ,["torch", "torchsde"] )
| 465
| 0
|
from __future__ import annotations
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Tuple = len(_lowercase ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
UpperCAmelCase_ : Tuple = i + 1
else:
UpperCAmelCase_ : Union[str, Any] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{two_pointer([2, 7, 11, 15], 9) = }""")
| 30
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Optional[int] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
__A : List[Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = {}
with open(UpperCamelCase__ , '''r''' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
UpperCAmelCase = getattr(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
UpperCAmelCase = PARAM_MAPPING[full_name.split('''.''' )[-1]]
UpperCAmelCase = '''param'''
if weight_type is not None and weight_type != "param":
UpperCAmelCase = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = '''.'''.join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if '''lm_head''' in full_key else value[0]
__A : str = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
UpperCAmelCase = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
UpperCAmelCase = '''weight_g'''
elif "weight_v" in name:
UpperCAmelCase = '''weight_v'''
elif "bias" in name:
UpperCAmelCase = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = '''weight'''
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
UpperCAmelCase = full_name.split('''conv_layers.''' )[-1]
UpperCAmelCase = name.split('''.''' )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True , UpperCamelCase__=False ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(UpperCamelCase__ )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(UpperCamelCase__ )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(UpperCamelCase__ , '''vocab.json''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCamelCase__ , )
UpperCAmelCase = True if config.feat_extract_norm == '''layer''' else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
UpperCAmelCase = WavaVecaForCTC(UpperCamelCase__ )
else:
UpperCAmelCase = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task='''audio_pretraining''' )
UpperCAmelCase = fairseq.tasks.setup_task(UpperCamelCase__ )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
UpperCAmelCase = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
__A : Dict = parser.parse_args()
__A : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 130
| 0
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCamelCase__ ( _UpperCAmelCase ):
def __init__(self : List[str] , _snake_case : Distribution , _snake_case : Optional[int]=None , _snake_case : Optional[Any]=None , _snake_case : Union[str, Any]=0 ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = 1.0 if scale is None else scale
lowerCamelCase_ : Optional[Any] = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_ )] )
@property
def UpperCAmelCase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase_ (self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase_ (self : Any ) -> str:
"""simple docstring"""
return self.variance.sqrt()
class lowerCamelCase__ ( nn.Module ):
def __init__(self : Any , _snake_case : int , _snake_case : Dict[str, int] , _snake_case : Callable[..., Tuple[torch.Tensor]] , **_snake_case : int ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_ : Optional[int] = args_dim
lowerCamelCase_ : List[str] = nn.ModuleList([nn.Linear(A_ , A_ ) for dim in args_dim.values()] )
lowerCamelCase_ : Union[str, Any] = domain_map
def UpperCAmelCase_ (self : str , _snake_case : torch.Tensor ) -> Tuple[torch.Tensor]:
"""simple docstring"""
lowerCamelCase_ : List[str] = [proj(A_ ) for proj in self.proj]
return self.domain_map(*A_ )
class lowerCamelCase__ ( nn.Module ):
def __init__(self : List[Any] , _snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
super().__init__()
lowerCamelCase_ : int = function
def UpperCAmelCase_ (self : Dict , _snake_case : Dict , *_snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return self.function(A_ , *A_ )
class lowerCamelCase__ :
lowerCamelCase_ : type
lowerCamelCase_ : int
lowerCamelCase_ : Dict[str, int]
def __init__(self : Optional[int] , _snake_case : int = 1 ) -> None:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = dim
lowerCamelCase_ : List[str] = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase_ (self : str , _snake_case : Optional[int] ) -> List[str]:
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*A_ )
else:
return Independent(self.distribution_class(*A_ ) , 1 )
def UpperCAmelCase_ (self : Any , _snake_case : Union[str, Any] , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None , ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = self._base_distribution(A_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim )
@property
def UpperCAmelCase_ (self : str ) -> Tuple:
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase_ (self : str ) -> int:
"""simple docstring"""
return len(self.event_shape )
@property
def UpperCAmelCase_ (self : Any ) -> float:
"""simple docstring"""
return 0.0
def UpperCAmelCase_ (self : List[str] , _snake_case : int ) -> nn.Module:
"""simple docstring"""
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCAmelCase_ (self : str , *_snake_case : torch.Tensor ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def UpperCAmelCase_ (_snake_case : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
return (x + torch.sqrt(torch.square(A_ ) + 4.0 )) / 2.0
class lowerCamelCase__ ( _UpperCAmelCase ):
lowerCamelCase_ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase_ : type = StudentT
@classmethod
def UpperCAmelCase_ (cls : Optional[int] , _snake_case : torch.Tensor , _snake_case : torch.Tensor , _snake_case : torch.Tensor ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Dict = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCamelCase_ : str = 2.0 + cls.squareplus(A_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__ ( _UpperCAmelCase ):
lowerCamelCase_ : Dict[str, int] = {"loc": 1, "scale": 1}
lowerCamelCase_ : type = Normal
@classmethod
def UpperCAmelCase_ (cls : Any , _snake_case : torch.Tensor , _snake_case : torch.Tensor ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = cls.squareplus(A_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCamelCase__ ( _UpperCAmelCase ):
lowerCamelCase_ : Dict[str, int] = {"total_count": 1, "logits": 1}
lowerCamelCase_ : type = NegativeBinomial
@classmethod
def UpperCAmelCase_ (cls : Union[str, Any] , _snake_case : torch.Tensor , _snake_case : torch.Tensor ) -> str:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = cls.squareplus(A_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCAmelCase_ (self : Optional[int] , _snake_case : Union[str, Any] ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_ )
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_ ) , 1 )
def UpperCAmelCase_ (self : Tuple , _snake_case : Dict , _snake_case : Optional[torch.Tensor] = None , _snake_case : Optional[torch.Tensor] = None ) -> Distribution:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_ : Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 705
|
def _a ( lowerCamelCase__ ) -> int:
lowerCamelCase_ : List[Any] = []
lowerCamelCase_ : int = set({'(', '[', '{'} )
lowerCamelCase_ : Optional[Any] = set({')', ']', '}'} )
lowerCamelCase_ : Dict = {'{': '}', '[': ']', '(': ')'}
for i in range(len(lowerCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowerCamelCase__ ) == 0 or (len(lowerCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowerCamelCase__ ) == 0
def _a ( ) -> str:
lowerCamelCase_ : Dict = input('Enter sequence of brackets: ' )
if is_balanced(lowerCamelCase__ ):
print(lowerCamelCase__ , 'is balanced' )
else:
print(lowerCamelCase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 144
| 0
|
from __future__ import annotations
def __lowerCAmelCase ( __magic_name__ ):
if len(__magic_name__ ) == 0:
return array
_lowercase , _lowercase: List[Any] = min(__magic_name__ ), max(__magic_name__ )
# Compute the variables
_lowercase: Optional[int] = _max - _min + 1
_lowercase , _lowercase: Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
_lowercase: List[Any] = i - _min
_lowercase: int = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
_lowercase: Dict = 0
for i in range(__magic_name__ ):
while holes_repeat[i] > 0:
_lowercase: Dict = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : List[Any] = input('Enter numbers separated by comma:\n')
_SCREAMING_SNAKE_CASE : List[Any] = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 226
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_SCREAMING_SNAKE_CASE : int = None
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Any = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
_SCREAMING_SNAKE_CASE : Any = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
},
'tokenizer_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json',
},
}
_SCREAMING_SNAKE_CASE : Dict = {
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
_SCREAMING_SNAKE_CASE : Optional[Any] = '▁'
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Union[str, Any] = AlbertTokenizer
def __init__( self : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Tuple=True , _UpperCamelCase : Any=True , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : int="[CLS]" , _UpperCamelCase : List[str]="[SEP]" , _UpperCamelCase : int="<unk>" , _UpperCamelCase : List[Any]="[SEP]" , _UpperCamelCase : str="<pad>" , _UpperCamelCase : Optional[int]="[CLS]" , _UpperCamelCase : str="[MASK]" , **_UpperCamelCase : List[str] , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_lowercase: Any = (
AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase , normalized=_UpperCamelCase)
if isinstance(_UpperCamelCase , _UpperCamelCase)
else mask_token
)
super().__init__(
_UpperCamelCase , tokenizer_file=_UpperCamelCase , do_lower_case=_UpperCamelCase , remove_space=_UpperCamelCase , keep_accents=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , pad_token=_UpperCamelCase , cls_token=_UpperCamelCase , mask_token=_UpperCamelCase , **_UpperCamelCase , )
_lowercase: str = do_lower_case
_lowercase: Optional[Any] = remove_space
_lowercase: Dict = keep_accents
_lowercase: List[Any] = vocab_file
_lowercase: Tuple = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Any , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None):
_lowercase: Optional[int] = [self.sep_token_id]
_lowercase: List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : str , _UpperCamelCase : List[int] , _UpperCamelCase : Optional[List[int]] = None):
_lowercase: Optional[int] = [self.sep_token_id]
_lowercase: Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer.")
if not os.path.isdir(_UpperCamelCase):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowercase: int = os.path.join(
_UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(_UpperCamelCase):
copyfile(self.vocab_file , _UpperCamelCase)
return (out_vocab_file,)
| 226
| 1
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
snake_case : int = logging.get_logger(__name__)
snake_case : int = {
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/config.json',
# See all BART models at https://huggingface.co/models?filter=bart
}
class lowerCAmelCase__ ( UpperCamelCase ):
__A : Tuple = 'bart'
__A : Optional[Any] = ['past_key_values']
__A : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Dict , _A : Any=5_0265 , _A : Optional[int]=1024 , _A : Any=12 , _A : Union[str, Any]=4096 , _A : Optional[int]=16 , _A : List[Any]=12 , _A : Any=4096 , _A : Tuple=16 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : List[Any]="gelu" , _A : List[Any]=1024 , _A : Dict=0.1 , _A : Optional[Any]=0.0 , _A : Union[str, Any]=0.0 , _A : Any=0.02 , _A : Tuple=0.0 , _A : Union[str, Any]=False , _A : Dict=True , _A : str=3 , _A : List[Any]=1 , _A : Union[str, Any]=0 , _A : List[Any]=2 , _A : Optional[int]=True , _A : Optional[Any]=2 , _A : Dict=2 , **_A : Tuple , ):
A__ : Optional[int] = vocab_size
A__ : List[Any] = max_position_embeddings
A__ : Optional[Any] = d_model
A__ : Optional[Any] = encoder_ffn_dim
A__ : int = encoder_layers
A__ : int = encoder_attention_heads
A__ : List[str] = decoder_ffn_dim
A__ : str = decoder_layers
A__ : Any = decoder_attention_heads
A__ : Any = dropout
A__ : Optional[int] = attention_dropout
A__ : Any = activation_dropout
A__ : List[str] = activation_function
A__ : List[Any] = init_std
A__ : Optional[int] = encoder_layerdrop
A__ : Optional[int] = decoder_layerdrop
A__ : Tuple = classifier_dropout
A__ : Optional[Any] = use_cache
A__ : Dict = encoder_layers
A__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , _A):
A__ : Any = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed.")
class lowerCAmelCase__ ( UpperCamelCase ):
@property
def _lowercase ( self : str):
if self.task in ["default", "seq2seq-lm"]:
A__ : Union[str, Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
A__ : Union[str, Any] = {0: "batch"}
A__ : Tuple = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
A__ : List[Any] = {0: "batch", 1: "decoder_sequence"}
A__ : List[str] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_A , direction="inputs")
elif self.task == "causal-lm":
# TODO: figure this case out.
A__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
])
if self.use_past:
A__ , A__ : List[str] = self.num_layers
for i in range(_A):
A__ : int = {0: "batch", 2: "past_sequence + sequence"}
A__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
else:
A__ : Optional[int] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
])
return common_inputs
@property
def _lowercase ( self : Tuple):
if self.task in ["default", "seq2seq-lm"]:
A__ : str = super().outputs
else:
A__ : str = super(_A , self).outputs
if self.use_past:
A__ , A__ : Any = self.num_layers
for i in range(_A):
A__ : int = {0: "batch", 2: "past_sequence + sequence"}
A__ : Union[str, Any] = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
A__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
# Generate decoder inputs
A__ : Any = seq_length if not self.use_past else 1
A__ : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
A__ : List[str] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
A__ : int = dict(**_A , **_A)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
A__ , A__ : Tuple = common_inputs["input_ids"].shape
A__ : Tuple = common_inputs["decoder_input_ids"].shape[1]
A__ , A__ : Optional[Any] = self.num_attention_heads
A__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ : Tuple = decoder_seq_length + 3
A__ : str = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
A__ : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_A , _A)] , dim=1)
A__ : List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
A__ , A__ : str = self.num_layers
A__ : int = min(_A , _A)
A__ : List[Any] = max(_A , _A) - min_num_layers
A__ : Optional[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_A):
common_inputs["past_key_values"].append(
(
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
torch.zeros(_A),
))
# TODO: test this.
A__ : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_A , _A):
common_inputs["past_key_values"].append((torch.zeros(_A), torch.zeros(_A)))
return common_inputs
def _lowercase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
A__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A)
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
A__ , A__ : List[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Dict = seqlen + 2
A__ , A__ : str = self.num_layers
A__ , A__ : Any = self.num_attention_heads
A__ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
A__ : Union[str, Any] = common_inputs["attention_mask"].dtype
A__ : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(_A , _A , dtype=_A)] , dim=1)
A__ : Optional[int] = [
(torch.zeros(_A), torch.zeros(_A)) for _ in range(_A)
]
return common_inputs
def _lowercase ( self : Dict , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
A__ : Dict = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
A__ : Tuple = tokenizer.num_special_tokens_to_add(_A)
A__ : Tuple = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A)
# Generate dummy inputs according to compute batch and sequence
A__ : Optional[Any] = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
A__ : Tuple = dict(tokenizer(_A , return_tensors=_A))
return common_inputs
def _lowercase ( self : List[str] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
A__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
elif self.task == "causal-lm":
A__ : Dict = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
else:
A__ : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
return common_inputs
def _lowercase ( self : Optional[Any] , _A : Optional[Any] , _A : Tuple , _A : Dict , _A : Any):
if self.task in ["default", "seq2seq-lm"]:
A__ : Optional[int] = super()._flatten_past_key_values_(_A , _A , _A , _A)
else:
A__ : Union[str, Any] = super(_A , self)._flatten_past_key_values_(
_A , _A , _A , _A)
| 182
|
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 182
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A (__lowerCamelCase :int ):
_lowerCAmelCase = botoa.client("""iam""" )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def A ():
_lowerCAmelCase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __lowerCamelCase , )
_lowerCAmelCase = None
if credentials_configuration == 0:
_lowerCAmelCase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
_lowerCAmelCase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
_lowerCAmelCase = _ask_field("""AWS Access Key ID: """ )
_lowerCAmelCase = aws_access_key_id
_lowerCAmelCase = _ask_field("""AWS Secret Access Key: """ )
_lowerCAmelCase = aws_secret_access_key
_lowerCAmelCase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
_lowerCAmelCase = aws_region
_lowerCAmelCase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __lowerCamelCase , )
if role_management == 0:
_lowerCAmelCase = _ask_field("""Enter your IAM role name: """ )
else:
_lowerCAmelCase = """accelerate_sagemaker_execution_role"""
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(__lowerCamelCase )
_lowerCAmelCase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_custom_docker_image:
_lowerCAmelCase = _ask_field("""Enter your Docker image: """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
_lowerCAmelCase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_inputs_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_metrics_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
_lowerCAmelCase = {}
_lowerCAmelCase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
_lowerCAmelCase = """dynamo_"""
_lowerCAmelCase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_lowerCAmelCase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
_lowerCAmelCase = _ask_options(
"""Which mode do you want to use?""" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="""default""" , )
_lowerCAmelCase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
_lowerCAmelCase = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_lowerCAmelCase = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="""ml.p3.2xlarge""" )
_lowerCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_lowerCAmelCase = _ask_field(
"""How many machines do you want use? [1]: """ , __lowerCamelCase , default=1 , )
_lowerCAmelCase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 5
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class __UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
_UpperCamelCase = ["""image_processor""", """tokenizer"""]
_UpperCamelCase = """BlipImageProcessor"""
_UpperCamelCase = """AutoTokenizer"""
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : List[str]) -> Union[str, Any]:
super().__init__(_lowercase , _lowercase)
# add QFormer tokenizer
A_ = qformer_tokenizer
def __call__( self : Optional[int] , _lowercase : ImageInput = None , _lowercase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , _lowercase : bool = True , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Union[bool, str, TruncationStrategy] = None , _lowercase : Optional[int] = None , _lowercase : int = 0 , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : Optional[Union[str, TensorType]] = None , **_lowercase : List[Any] , ) -> BatchFeature:
if images is None and text is None:
raise ValueError('You have to specify at least images or text.')
A_ = BatchFeature()
if text is not None:
A_ = self.tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
encoding.update(_lowercase)
A_ = self.qformer_tokenizer(
text=_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , stride=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , return_overflowing_tokens=_lowercase , return_special_tokens_mask=_lowercase , return_offsets_mapping=_lowercase , return_token_type_ids=_lowercase , return_length=_lowercase , verbose=_lowercase , return_tensors=_lowercase , **_lowercase , )
A_ = qformer_text_encoding.pop('input_ids')
A_ = qformer_text_encoding.pop('attention_mask')
if images is not None:
A_ = self.image_processor(_lowercase , return_tensors=_lowercase)
encoding.update(_lowercase)
return encoding
def __snake_case ( self : List[Any] , *_lowercase : Optional[Any] , **_lowercase : List[Any]) -> Union[str, Any]:
return self.tokenizer.batch_decode(*_lowercase , **_lowercase)
def __snake_case ( self : Optional[int] , *_lowercase : List[Any] , **_lowercase : List[str]) -> str:
return self.tokenizer.decode(*_lowercase , **_lowercase)
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __snake_case ( self : Any) -> Optional[int]:
A_ = self.tokenizer.model_input_names
A_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
def __snake_case ( self : Optional[Any] , _lowercase : Optional[int] , **_lowercase : int) -> Tuple:
if os.path.isfile(_lowercase):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file')
os.makedirs(_lowercase , exist_ok=_lowercase)
A_ = os.path.join(_lowercase , 'qformer_tokenizer')
self.qformer_tokenizer.save_pretrained(_lowercase)
return super().save_pretrained(_lowercase , **_lowercase)
@classmethod
def __snake_case ( cls : Dict , _lowercase : Dict , **_lowercase : int) -> Any:
A_ = AutoTokenizer.from_pretrained(_lowercase , subfolder='qformer_tokenizer')
A_ = cls._get_arguments_from_pretrained(_lowercase , **_lowercase)
args.append(_lowercase)
return cls(*_lowercase)
| 366
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def snake_case_ ( lowercase__ : Union[str, Any] ):
'''simple docstring'''
return EnvironmentCommand()
def snake_case_ ( lowercase__ : List[str] ):
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ : ArgumentParser ):
_lowerCAmelCase =parser.add_parser("""env""" )
download_parser.set_defaults(func=lowerCamelCase_ )
download_parser.add_argument(
"""--accelerate-config_file""" , default=lowerCamelCase_ , help="""The accelerate config file to use for the default values in the launching script.""" , )
download_parser.set_defaults(func=lowerCamelCase_ )
def __init__( self : List[Any] , lowerCamelCase_ : List[Any] , *lowerCamelCase_ : List[str] ):
_lowerCAmelCase =accelerate_config_file
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase ="""not installed"""
if is_safetensors_available():
import safetensors
_lowerCAmelCase =safetensors.__version__
elif importlib.util.find_spec("""safetensors""" ) is not None:
import safetensors
_lowerCAmelCase =F"{safetensors.__version__} but is ignored because of PyTorch version too old."
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase =_lowerCAmelCase ="""not found"""
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
_lowerCAmelCase =accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCamelCase_ ):
_lowerCAmelCase =load_config_from_file(self._accelerate_config_file ).to_dict()
_lowerCAmelCase =(
"""\n""".join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase_ , lowerCamelCase_ )
else F"\t{accelerate_config}"
)
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase ="""NA"""
if is_torch_available():
import torch
_lowerCAmelCase =torch.__version__
_lowerCAmelCase =torch.cuda.is_available()
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase ="""NA"""
if is_tf_available():
import tensorflow as tf
_lowerCAmelCase =tf.__version__
try:
# deprecated in v2.1
_lowerCAmelCase =tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
_lowerCAmelCase =bool(tf.config.list_physical_devices("""GPU""" ) )
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase ="""not installed"""
_lowerCAmelCase ="""NA"""
if is_flax_available():
import flax
import jax
import jaxlib
_lowerCAmelCase =flax.__version__
_lowerCAmelCase =jax.__version__
_lowerCAmelCase =jaxlib.__version__
_lowerCAmelCase =jax.lib.xla_bridge.get_backend().platform
_lowerCAmelCase ={
"""`transformers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Huggingface_hub version""": huggingface_hub.__version__,
"""Safetensors version""": F"{safetensors_version}",
"""Accelerate version""": F"{accelerate_version}",
"""Accelerate config""": F"{accelerate_config_str}",
"""PyTorch version (GPU?)""": F"{pt_version} ({pt_cuda_available})",
"""Tensorflow version (GPU?)""": F"{tf_version} ({tf_cuda_available})",
"""Flax version (CPU?/GPU?/TPU?)""": F"{flax_version} ({jax_backend})",
"""Jax version""": F"{jax_version}",
"""JaxLib version""": F"{jaxlib_version}",
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(lowerCamelCase_ ) )
return info
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase_ : Union[str, Any] ):
return "\n".join([F"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 707
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Dict = StableDiffusionInstructPixaPixPipeline
a_: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_: str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase =CLIPTextModel(lowerCamelCase_ )
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str=0 ):
_lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" )
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase ="""french fries"""
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
_lowerCAmelCase =output.images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =[inputs["""prompt"""]] * 2
_lowerCAmelCase =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_lowerCAmelCase =torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
_lowerCAmelCase =image / 2 + 0.5
_lowerCAmelCase =image.permute(0 , 3 , 1 , 2 )
_lowerCAmelCase =image.repeat(2 , 1 , 1 , 1 )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_lowerCAmelCase =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =[round(lowerCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =VaeImageProcessor(do_resize=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =pipe(**self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="""pt""" ) )[0]
_lowerCAmelCase =components["""vae"""]
_lowerCAmelCase =self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCAmelCase =vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCAmelCase =pipe(**lowerCamelCase_ )[0]
_lowerCAmelCase =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase_ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Optional[int]=0 ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_lowerCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
_lowerCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
_lowerCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =0
def callback_fn(lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor ) -> None:
_lowerCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase =latents[0, -3:, -3:, -1]
_lowerCAmelCase =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase =latents[0, -3:, -3:, -1]
_lowerCAmelCase =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_lowerCAmelCase =False
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase =inputs["""image"""].resize((504, 504) )
_lowerCAmelCase ="""timbrooks/instruct-pix2pix"""
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =pipe(**lowerCamelCase_ )
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_lowerCAmelCase =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 149
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''', '''False''' ) ) is not True, reason='''Skipping test because should only be run when releasing minor transformers version''', )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self : int ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def A_ ( self : Union[str, Any] , _a : Tuple=1 ):
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def A_ ( self : List[Any] , _a : Dict ):
TrainingJobAnalytics(_a ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def A_ ( self : Union[str, Any] ):
# create estimator
UpperCamelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 240
|
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowercase = HfArgumentParser(InitializationArguments)
lowercase = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowercase = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowercase = {
"""vocab_size""": len(tokenizer),
"""scale_attn_by_inverse_layer_idx""": True,
"""reorder_and_upcast_attn""": True,
}
# Load model config (GPT-2 large in this case)
lowercase = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowercase = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 240
| 1
|
'''simple docstring'''
from math import factorial
_lowercase = {str(digit): factorial(digit) for digit in range(10)}
def lowerCamelCase__ ( a ):
if not isinstance(a , a ):
raise TypeError('Parameter number must be int' )
if number < 0:
raise ValueError('Parameter number must be greater than or equal to 0' )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a ) )
def lowerCamelCase__ ( a = 60 , a = 1000000 ):
if not isinstance(a , a ) or not isinstance(a , a ):
raise TypeError('Parameters chain_length and number_limit must be int' )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
'Parameters chain_length and number_limit must be greater than 0' )
# the counter for the chains with the exact desired length
__snake_case = 0
# the cached sizes of the previous chains
__snake_case = {}
for start_chain_element in range(1 , a ):
# The temporary set will contain the elements of the chain
__snake_case = set()
__snake_case = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__snake_case = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a )
chain_set_length += 1
__snake_case = digit_factorial_sum(a )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__snake_case = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 427
|
'''simple docstring'''
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class a_ ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
__snake_case = 0
@slow
def lowercase__ ( self : str ):
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(__lowerCAmelCase ) , 0 )
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 2_0 )
def lowercase__ ( self : Any ):
__snake_case = AutoConfig.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
# Check that tokenizer_type ≠ model_type
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.txt' , os.path.join(__lowerCAmelCase , 'vocab.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='bert' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('./tests/fixtures/vocab.json' , os.path.join(__lowerCAmelCase , 'vocab.json' ) )
shutil.copy('./tests/fixtures/merges.txt' , os.path.join(__lowerCAmelCase , 'merges.txt' ) )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , tokenizer_type='gpt2' )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : int ):
with pytest.raises(__lowerCAmelCase ):
AutoTokenizer.from_pretrained('./' , tokenizer_type='xxx' )
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
__snake_case = tokenizer_class.from_pretrained('wietsedv/bert-base-dutch-cased' )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case , __lowerCAmelCase )
else:
self.assertEqual(tokenizer.do_lower_case , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
@require_tokenizers
def lowercase__ ( self : str ):
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
__lowerCAmelCase , 'julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier' , ):
__snake_case = tokenizer_class.from_pretrained('julien-c/herlolip-not-exists' )
def lowercase__ ( self : Any ):
# tests: https://github.com/huggingface/transformers/pull/13251
# 1. models with `-`, e.g. xlm-roberta -> xlm_roberta
# 2. models that don't remap 1-1 from model-name to model file, e.g., openai-gpt -> openai
__snake_case = TOKENIZER_MAPPING.values()
__snake_case = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(__lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : List[str] ):
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=__lowerCAmelCase ) , __lowerCAmelCase )
self.assertIsInstance(AutoTokenizer.from_pretrained('bert-base-cased' ) , __lowerCAmelCase )
@require_tokenizers
def lowercase__ ( self : Optional[int] ):
__snake_case = AutoTokenizer.from_pretrained('distilbert-base-uncased' , do_lower_case=__lowerCAmelCase )
__snake_case = 'Hello, world. How are you?'
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
__snake_case = AutoTokenizer.from_pretrained('microsoft/mpnet-base' , do_lower_case=__lowerCAmelCase )
__snake_case = tokenizer.tokenize(__lowerCAmelCase )
self.assertEqual('[UNK]' , tokens[0] )
@require_tokenizers
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained('robot-test/dummy-tokenizer-fast-with-model-config' )
self.assertEqual(type(__lowerCAmelCase ) , __lowerCAmelCase )
self.assertEqual(tokenizer.model_max_length , 5_1_2 )
self.assertEqual(tokenizer.vocab_size , 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token , '[UNK]' )
self.assertEqual(tokenizer.padding_side , 'right' )
self.assertEqual(tokenizer.truncation_side , 'right' )
def lowercase__ ( self : List[str] ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size , 1_2 )
def lowercase__ ( self : Tuple ):
__snake_case = AutoTokenizer.from_pretrained('ctrl' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
def lowercase__ ( self : Dict ):
# Check we can load the tokenizer config of an online model.
__snake_case = get_tokenizer_config('bert-base-cased' )
__snake_case = config.pop('_commit_hash' , __lowerCAmelCase )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(__lowerCAmelCase , {'do_lower_case': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
__snake_case = get_tokenizer_config(__lowerCAmelCase )
self.assertDictEqual(__lowerCAmelCase , {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = get_tokenizer_config(__lowerCAmelCase )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['tokenizer_class'] , 'BertTokenizer' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
__snake_case = CustomTokenizer.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def lowercase__ ( self : Union[str, Any] ):
try:
AutoConfig.register('custom' , __lowerCAmelCase )
# Can register in two steps
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, None) )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig] , (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowerCAmelCase ):
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
__snake_case = BertTokenizerFast.from_pretrained(__lowerCAmelCase )
bert_tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = CustomTokenizerFast.from_pretrained(__lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertIsInstance(__lowerCAmelCase , __lowerCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__lowerCAmelCase ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCAmelCase )
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertEqual(reloaded_tokenizer.__class__.__name__ , 'NewTokenizer' )
@require_tokenizers
def lowercase__ ( self : str ):
class a_ ( UpperCAmelCase__ ):
lowercase_ : str = False
class a_ ( UpperCAmelCase__ ):
lowercase_ : Optional[Any] = NewTokenizer
lowercase_ : str = False
try:
AutoConfig.register('custom' , __lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , slow_tokenizer_class=__lowerCAmelCase )
AutoTokenizer.register(__lowerCAmelCase , fast_tokenizer_class=__lowerCAmelCase )
# If remote code is not set, the default is to use local
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/test_dynamic_tokenizer' , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertFalse(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
self.assertTrue(tokenizer.special_attribute_present )
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
__snake_case = AutoTokenizer.from_pretrained(
'hf-internal-testing/test_dynamic_tokenizer_legacy' , trust_remote_code=__lowerCAmelCase , use_fast=__lowerCAmelCase )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__lowerCAmelCase , 'bert-base is not a local folder and is not a valid model identifier' ):
__snake_case = AutoTokenizer.from_pretrained('bert-base' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
__lowerCAmelCase , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
__snake_case = AutoTokenizer.from_pretrained(__lowerCAmelCase , revision='aaaaaa' )
def lowercase__ ( self : int ):
# Make sure we have cached the tokenizer.
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
with RequestCounter() as counter:
__snake_case = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 427
| 1
|
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "owlvit_text_model"
def __init__( self , _a=4_9_4_0_8 , _a=5_1_2 , _a=2_0_4_8 , _a=1_2 , _a=8 , _a=1_6 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9_4_0_6 , _a=4_9_4_0_7 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : Tuple = hidden_size
_a : Optional[int] = intermediate_size
_a : Any = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = max_position_embeddings
_a : List[Any] = hidden_act
_a : str = layer_norm_eps
_a : Dict = attention_dropout
_a : Any = initializer_range
_a : Union[str, Any] = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Optional[int] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : int = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "owlvit_vision_model"
def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=1_2 , _a=1_2 , _a=3 , _a=7_6_8 , _a=3_2 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Dict:
super().__init__(**_a )
_a : Dict = hidden_size
_a : Optional[Any] = intermediate_size
_a : Optional[Any] = num_hidden_layers
_a : int = num_attention_heads
_a : int = num_channels
_a : int = image_size
_a : Dict = patch_size
_a : List[str] = hidden_act
_a : Tuple = layer_norm_eps
_a : int = attention_dropout
_a : Any = initializer_range
_a : Dict = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Tuple = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "owlvit"
UpperCAmelCase__ : str = True
def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=True , **_a , ) -> Any:
super().__init__(**_a )
if text_config is None:
_a : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_a : List[str] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_a : str = OwlViTTextConfig(**_a )
_a : Dict = OwlViTVisionConfig(**_a )
_a : int = projection_dim
_a : Optional[int] = logit_scale_init_value
_a : List[Any] = return_dict
_a : Optional[Any] = 1.0
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Dict = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
@classmethod
def __lowercase ( cls , _a , _a , **_a ) -> Dict:
_a : int = {}
_a : Optional[int] = text_config
_a : Optional[Any] = vision_config
return cls.from_dict(_a , **_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = copy.deepcopy(self.__dict__ )
_a : Optional[int] = self.text_config.to_dict()
_a : Optional[Any] = self.vision_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-4
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]:
_a : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
_a : Optional[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def __lowercase ( self ) -> int:
return 1_4
| 14
|
"""simple docstring"""
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : int ):
_A = None
_A = None
_A = graph
self._normalize_graph(_UpperCAmelCase , _UpperCAmelCase )
_A = len(_UpperCAmelCase )
_A = None
def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict ):
if sources is int:
_A = [sources]
if sinks is int:
_A = [sinks]
if len(_UpperCAmelCase ) == 0 or len(_UpperCAmelCase ) == 0:
return
_A = sources[0]
_A = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_UpperCAmelCase ) > 1 or len(_UpperCAmelCase ) > 1:
_A = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
_A = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
_A = max_input_flow
_A = 0
_A = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
_A = max_input_flow
_A = size - 1
def lowerCAmelCase_ ( self : Optional[Any] ):
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Union[str, Any] ):
_A = algorithm(self )
class lowercase_ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Union[str, Any] ):
_A = flow_network
_A = flow_network.verticesCount
_A = flow_network.sourceIndex
_A = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
_A = flow_network.graph
_A = False
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
self._algorithm()
_A = True
def lowerCAmelCase_ ( self : int ):
pass
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : int , _UpperCAmelCase : Any ):
super().__init__(_UpperCAmelCase )
# use this to save your result
_A = -1
def lowerCAmelCase_ ( self : Optional[Any] ):
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : List[Any] ):
super().__init__(_UpperCAmelCase )
_A = [[0] * self.verticies_count for i in range(self.verticies_count )]
_A = [0] * self.verticies_count
_A = [0] * self.verticies_count
def lowerCAmelCase_ ( self : Dict ):
_A = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
_A = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
_A = 0
while i < len(_UpperCAmelCase ):
_A = vertices_list[i]
_A = self.heights[vertex_index]
self.process_vertex(_UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_UpperCAmelCase ) )
_A = 0
else:
i += 1
_A = sum(self.preflow[self.source_index] )
def lowerCAmelCase_ ( self : int , _UpperCAmelCase : Any ):
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_UpperCAmelCase , _UpperCAmelCase )
self.relabel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple ):
_A = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def lowerCAmelCase_ ( self : Union[str, Any] , _UpperCAmelCase : int ):
_A = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
_A = self.heights[to_index]
if min_height is not None:
_A = min_height + 1
if __name__ == "__main__":
a = [0]
a = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
a = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
a = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
a = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 7
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = random.Random()
def __lowercase ( snake_case, snake_case=1.0, snake_case=None, snake_case=None ):
"""simple docstring"""
if rng is None:
__magic_name__ :Optional[Any] = global_rng
__magic_name__ :str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=7 , __lowerCAmelCase=4_0_0 , __lowerCAmelCase=2_0_0_0 , __lowerCAmelCase=1 , __lowerCAmelCase=0.0 , __lowerCAmelCase=1_6_0_0_0 , __lowerCAmelCase=True , __lowerCAmelCase=8_0 , __lowerCAmelCase=1_6 , __lowerCAmelCase=6_4 , __lowerCAmelCase="hann_window" , __lowerCAmelCase=8_0 , __lowerCAmelCase=7_6_0_0 , __lowerCAmelCase=1E-10 , __lowerCAmelCase=True , ):
"""simple docstring"""
__magic_name__ :List[Any] = parent
__magic_name__ :int = batch_size
__magic_name__ :str = min_seq_length
__magic_name__ :List[str] = max_seq_length
__magic_name__ :Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__magic_name__ :Dict = feature_size
__magic_name__ :List[str] = padding_value
__magic_name__ :List[str] = sampling_rate
__magic_name__ :Dict = do_normalize
__magic_name__ :Union[str, Any] = num_mel_bins
__magic_name__ :Optional[int] = hop_length
__magic_name__ :Union[str, Any] = win_length
__magic_name__ :Tuple = win_function
__magic_name__ :Optional[Any] = fmin
__magic_name__ :Union[str, Any] = fmax
__magic_name__ :str = mel_floor
__magic_name__ :Tuple = return_attention_mask
def A ( self ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
"""simple docstring"""
def _flatten(__lowerCAmelCase ):
return list(itertools.chain(*__lowerCAmelCase ) )
if equal_length:
__magic_name__ :Optional[int] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__magic_name__ :Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ :List[str] = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
def A ( self , __lowerCAmelCase=False , __lowerCAmelCase=False ):
"""simple docstring"""
if equal_length:
__magic_name__ :List[Any] = [floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__magic_name__ :Optional[Any] = [
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__magic_name__ :int = [np.asarray(__lowerCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
class lowerCamelCase_ ( UpperCamelCase__ , unittest.TestCase ):
a__ = SpeechTaFeatureExtractor
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = SpeechTaFeatureExtractionTester(self )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(__lowerCAmelCase , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowerCAmelCase , axis=0 ) - 1 ) < 1E-3 ) )
def A ( self ):
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
__magic_name__ :Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ :Any = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :str = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
__magic_name__ :str = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
__magic_name__ :int = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
__magic_name__ :Optional[int] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
__magic_name__ :List[str] = feat_extract(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ :Dict = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
__magic_name__ :Optional[Any] = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = feat_extract(__lowerCAmelCase , padding=__lowerCAmelCase , max_length=__lowerCAmelCase , return_tensors='''np''' )
__magic_name__ :Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self.assertTrue(input_values[0][8_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self.assertTrue(input_values[0][1_0_0_0:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ :int = range(8_0_0 , 1_4_0_0 , 2_0_0 )
__magic_name__ :str = [floats_list((1, x) )[0] for x in lengths]
__magic_name__ :Any = ['''longest''', '''max_length''', '''do_not_pad''']
__magic_name__ :List[str] = [None, 1_6_0_0, None]
for max_length, padding in zip(__lowerCAmelCase , __lowerCAmelCase ):
__magic_name__ :Optional[int] = feat_extract(__lowerCAmelCase , max_length=__lowerCAmelCase , padding=__lowerCAmelCase )
__magic_name__ :Dict = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_0_0] )
self._check_zero_mean_unit_variance(input_values[1][:1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2][:1_2_0_0] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ :int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :Optional[int] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''max_length''' , return_tensors='''np''' )
__magic_name__ :Union[str, Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :Dict = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=1_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__magic_name__ :Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_0_0_0) )
__magic_name__ :List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :List[str] = feat_extract(
__lowerCAmelCase , truncation=__lowerCAmelCase , max_length=2_0_0_0 , padding='''longest''' , return_tensors='''np''' )
__magic_name__ :Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_0_0] )
self._check_zero_mean_unit_variance(input_values[1, :1_0_0_0] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_2_0_0) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__magic_name__ :Tuple = np.random.rand(1_0_0 ).astype(np.floataa )
__magic_name__ :Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__magic_name__ :List[Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__magic_name__ :Union[str, Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def A ( self ):
"""simple docstring"""
# Tests that all call wrap to encode_plus and batch_encode_plus
__magic_name__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__magic_name__ :Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
__magic_name__ :List[Any] = [np.asarray(__lowerCAmelCase ) for speech_input in speech_inputs]
# Test feature size
__magic_name__ :List[Any] = feature_extractor(audio_target=__lowerCAmelCase , padding=__lowerCAmelCase , return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
__magic_name__ :str = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_values
__magic_name__ :Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test batched
__magic_name__ :Optional[Any] = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
__magic_name__ :Union[str, Any] = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__magic_name__ :str = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__magic_name__ :Optional[Any] = np.asarray(__lowerCAmelCase )
__magic_name__ :List[str] = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
__magic_name__ :str = feature_extractor(__lowerCAmelCase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertTrue(np.allclose(__lowerCAmelCase , __lowerCAmelCase , atol=1E-3 ) )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ :int = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ :List[str] = feat_extract.model_input_names[0]
__magic_name__ :Optional[int] = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowerCAmelCase ) == len(__lowerCAmelCase ) for x, y in zip(__lowerCAmelCase , processed_features[input_name] ) ) )
__magic_name__ :Optional[int] = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
__magic_name__ :Tuple = BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
__magic_name__ :Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ :Union[str, Any] = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :int = self.feat_extract_tester.prepare_inputs_for_target(equal_length=__lowerCAmelCase )
__magic_name__ :Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ :List[str] = feat_extract.model_input_names[0]
__magic_name__ :int = BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
__magic_name__ :Optional[Any] = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
__magic_name__ :Any = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.feature_extraction_class(**self.feat_extract_dict )
__magic_name__ :Optional[int] = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ :Union[str, Any] = feat_extract.model_input_names[0]
__magic_name__ :List[Any] = BatchFeature({input_name: speech_inputs} )
__magic_name__ :Dict = feat_extract.num_mel_bins # hack!
__magic_name__ :Any = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
__magic_name__ :str = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.feat_extract_dict
__magic_name__ :List[Any] = True
__magic_name__ :int = self.feature_extraction_class(**__lowerCAmelCase )
__magic_name__ :Optional[Any] = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ :List[str] = [len(__lowerCAmelCase ) for x in speech_inputs]
__magic_name__ :str = feat_extract.model_input_names[0]
__magic_name__ :Optional[int] = BatchFeature({input_name: speech_inputs} )
__magic_name__ :str = feat_extract.num_mel_bins # hack!
__magic_name__ :List[Any] = feat_extract.pad(__lowerCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = self.feat_extract_dict
__magic_name__ :Optional[Any] = True
__magic_name__ :Dict = self.feature_extraction_class(**__lowerCAmelCase )
__magic_name__ :int = self.feat_extract_tester.prepare_inputs_for_target()
__magic_name__ :Optional[int] = [len(__lowerCAmelCase ) for x in speech_inputs]
__magic_name__ :Optional[Any] = feat_extract.model_input_names[0]
__magic_name__ :Optional[int] = BatchFeature({input_name: speech_inputs} )
__magic_name__ :str = min(__lowerCAmelCase )
__magic_name__ :List[Any] = feat_extract.num_mel_bins # hack!
__magic_name__ :Tuple = feat_extract.pad(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , __lowerCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
from datasets import load_dataset
__magic_name__ :List[Any] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
__magic_name__ :List[Any] = ds.sort('''id''' ).select(range(__lowerCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A ( self ):
"""simple docstring"""
# fmt: off
__magic_name__ :Tuple = torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
__magic_name__ :List[Any] = self._load_datasamples(1 )
__magic_name__ :Any = SpeechTaFeatureExtractor()
__magic_name__ :List[str] = feature_extractor(__lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 9_3_6_8_0) )
self.assertTrue(torch.allclose(input_values[0, :3_0] , __lowerCAmelCase , atol=1E-6 ) )
def A ( self ):
"""simple docstring"""
# fmt: off
__magic_name__ :List[str] = torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
__magic_name__ :Union[str, Any] = self._load_datasamples(1 )
__magic_name__ :Optional[int] = SpeechTaFeatureExtractor()
__magic_name__ :Optional[int] = feature_extractor(audio_target=__lowerCAmelCase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 3_6_6, 8_0) )
self.assertTrue(torch.allclose(input_values[0, 0, :3_0] , __lowerCAmelCase , atol=1E-4 ) )
| 717
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[str] = [
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 180
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Dict = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''gpt_bigcode'''
SCREAMING_SNAKE_CASE__ = ['''past_key_values''']
SCREAMING_SNAKE_CASE__ = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , lowercase : List[str]=5_02_57 , lowercase : Dict=10_24 , lowercase : Dict=7_68 , lowercase : int=12 , lowercase : int=12 , lowercase : Optional[int]=None , lowercase : List[Any]="gelu_pytorch_tanh" , lowercase : int=0.1 , lowercase : str=0.1 , lowercase : Optional[Any]=0.1 , lowercase : Any=1E-5 , lowercase : List[Any]=0.0_2 , lowercase : Dict=True , lowercase : Dict=True , lowercase : Optional[Any]=5_02_56 , lowercase : List[str]=5_02_56 , lowercase : Any=True , lowercase : Any=True , lowercase : List[Any]=True , **lowercase : Any , ):
'''simple docstring'''
UpperCAmelCase : Dict = vocab_size
UpperCAmelCase : Union[str, Any] = n_positions
UpperCAmelCase : Union[str, Any] = n_embd
UpperCAmelCase : Dict = n_layer
UpperCAmelCase : List[str] = n_head
UpperCAmelCase : Optional[Any] = n_inner
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[int] = resid_pdrop
UpperCAmelCase : int = embd_pdrop
UpperCAmelCase : List[Any] = attn_pdrop
UpperCAmelCase : Tuple = layer_norm_epsilon
UpperCAmelCase : int = initializer_range
UpperCAmelCase : str = scale_attn_weights
UpperCAmelCase : List[Any] = use_cache
UpperCAmelCase : Union[str, Any] = attention_softmax_in_fpaa
UpperCAmelCase : str = scale_attention_softmax_in_fpaa
UpperCAmelCase : Tuple = multi_query
UpperCAmelCase : Any = bos_token_id
UpperCAmelCase : Union[str, Any] = eos_token_id
super().__init__(bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
| 595
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ : str = logging.get_logger(__name__)
snake_case_ : Any = {
"""google/vivit-b-16x2-kinetics400""": (
"""https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"""
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = '''vivit'''
def __init__( self : Union[str, Any] , lowercase : int=2_24 , lowercase : Tuple=32 , lowercase : str=[2, 16, 16] , lowercase : str=3 , lowercase : Dict=7_68 , lowercase : Union[str, Any]=12 , lowercase : List[Any]=12 , lowercase : Dict=30_72 , lowercase : int="gelu_fast" , lowercase : Dict=0.0 , lowercase : Dict=0.0 , lowercase : List[str]=0.0_2 , lowercase : Tuple=1E-06 , lowercase : Any=True , **lowercase : Union[str, Any] , ):
'''simple docstring'''
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : int = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[str] = hidden_act
UpperCAmelCase : Dict = hidden_dropout_prob
UpperCAmelCase : Dict = attention_probs_dropout_prob
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : Any = layer_norm_eps
UpperCAmelCase : List[Any] = image_size
UpperCAmelCase : str = num_frames
UpperCAmelCase : str = tubelet_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : Optional[int] = qkv_bias
super().__init__(**lowercase )
| 595
| 1
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( _UpperCamelCase ):
__a = ["pixel_values"]
def __init__( self : int , _lowerCamelCase : bool = True , _lowerCamelCase : Dict[str, int] = None , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : bool = True , _lowerCamelCase : Union[int, float] = 1 / 255 , _lowerCamelCase : bool = True , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : bool = True , **_lowerCamelCase : str , ):
super().__init__(**__a )
_snake_case = size if size is not None else {"height": 384, "width": 384}
_snake_case = get_size_dict(__a , default_to_square=__a )
_snake_case = do_resize
_snake_case = size
_snake_case = resample
_snake_case = do_rescale
_snake_case = rescale_factor
_snake_case = do_normalize
_snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_snake_case = image_std if image_std is not None else OPENAI_CLIP_STD
_snake_case = do_convert_rgb
def lowercase ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, int] , _lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[str] , ):
_snake_case = get_size_dict(__a , default_to_square=__a )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
_snake_case = (size["height"], size["width"])
return resize(__a , size=__a , resample=__a , data_format=__a , **__a )
def lowercase ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[int, float] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : List[Any] , ):
return rescale(__a , scale=__a , data_format=__a , **__a )
def lowercase ( self : List[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Union[float, List[float]] , _lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **_lowerCamelCase : Tuple , ):
return normalize(__a , mean=__a , std=__a , data_format=__a , **__a )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : ImageInput , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Dict[str, int]] = None , _lowerCamelCase : PILImageResampling = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[float] = None , _lowerCamelCase : Optional[bool] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[float, List[float]]] = None , _lowerCamelCase : Optional[Union[str, TensorType]] = None , _lowerCamelCase : bool = None , _lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **_lowerCamelCase : Tuple , ):
_snake_case = do_resize if do_resize is not None else self.do_resize
_snake_case = resample if resample is not None else self.resample
_snake_case = do_rescale if do_rescale is not None else self.do_rescale
_snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
_snake_case = do_normalize if do_normalize is not None else self.do_normalize
_snake_case = image_mean if image_mean is not None else self.image_mean
_snake_case = image_std if image_std is not None else self.image_std
_snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_snake_case = size if size is not None else self.size
_snake_case = get_size_dict(__a , default_to_square=__a )
_snake_case = make_list_of_images(__a )
if not valid_images(__a ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_snake_case = [convert_to_rgb(__a ) for image in images]
# All transformations expect numpy arrays.
_snake_case = [to_numpy_array(__a ) for image in images]
if do_resize:
_snake_case = [self.resize(image=__a , size=__a , resample=__a ) for image in images]
if do_rescale:
_snake_case = [self.rescale(image=__a , scale=__a ) for image in images]
if do_normalize:
_snake_case = [self.normalize(image=__a , mean=__a , std=__a ) for image in images]
_snake_case = [to_channel_dimension_format(__a , __a ) for image in images]
_snake_case = BatchFeature(data={'''pixel_values''': images} , tensor_type=__a )
return encoded_outputs
| 707
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
def _UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : str ) -> List[Any]:
_snake_case = os.path.abspath(__lowerCamelCase )
logger.info(f'''Converting TensorFlow checkpoint from {tf_path}''' )
# Load weights from TF model
_snake_case = tf.train.list_variables(__lowerCamelCase )
_snake_case = []
_snake_case = []
_snake_case = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_snake_case = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'''Skipping non-model layer {full_name}''' )
continue
if "optimizer" in full_name:
logger.info(f'''Skipping optimization layer {full_name}''' )
continue
if name[0] == "model":
# ignore initial 'model'
_snake_case = name[1:]
# figure out how many levels deep the name is
_snake_case = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(__lowerCamelCase )
# read data
_snake_case = tf.train.load_variable(__lowerCamelCase , __lowerCamelCase )
names.append('''/'''.join(__lowerCamelCase ) )
arrays.append(__lowerCamelCase )
logger.info(f'''Read a total of {len(__lowerCamelCase ):,} layers''' )
# Sanity check
if len(set(__lowerCamelCase ) ) != 1:
raise ValueError(f'''Found layer names with different depths (layer depth {list(set(__lowerCamelCase ) )})''' )
_snake_case = list(set(__lowerCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(__lowerCamelCase , __lowerCamelCase ):
_snake_case = full_name.split('''/''' )
_snake_case = model
_snake_case = []
for i, m_name in enumerate(__lowerCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
_snake_case = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
_snake_case = getattr(__lowerCamelCase , '''embeddings''' )
_snake_case = getattr(__lowerCamelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
_snake_case = getattr(__lowerCamelCase , '''encoder''' )
_snake_case = getattr(__lowerCamelCase , '''layer''' )
_snake_case = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
_snake_case = getattr(__lowerCamelCase , '''pooler''' )
_snake_case = getattr(__lowerCamelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
_snake_case = getattr(__lowerCamelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
_snake_case = getattr(__lowerCamelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
_snake_case = getattr(__lowerCamelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
_snake_case = getattr(__lowerCamelCase , '''token_type_embeddings''' )
else:
raise ValueError(f'''Unknown embedding layer with name {full_name}''' )
trace.append('''weight''' )
_snake_case = getattr(__lowerCamelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
_snake_case = getattr(__lowerCamelCase , '''attention''' )
_snake_case = getattr(__lowerCamelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
_snake_case = getattr(__lowerCamelCase , '''attention''' )
_snake_case = getattr(__lowerCamelCase , '''output''' )
_snake_case = getattr(__lowerCamelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
_snake_case = getattr(__lowerCamelCase , '''attention''' )
_snake_case = getattr(__lowerCamelCase , '''output''' )
_snake_case = getattr(__lowerCamelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
_snake_case = getattr(__lowerCamelCase , '''output''' )
_snake_case = getattr(__lowerCamelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
_snake_case = getattr(__lowerCamelCase , '''output''' )
_snake_case = getattr(__lowerCamelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
_snake_case = getattr(__lowerCamelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
_snake_case = getattr(__lowerCamelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
_snake_case = getattr(__lowerCamelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
_snake_case = getattr(__lowerCamelCase , '''intermediate''' )
_snake_case = getattr(__lowerCamelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
_snake_case = getattr(__lowerCamelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
_snake_case = getattr(__lowerCamelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
_snake_case = getattr(__lowerCamelCase , '''weight''' )
else:
logger.warning(f'''Ignored {m_name}''' )
# for certain layers reshape is necessary
_snake_case = '''.'''.join(__lowerCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , __lowerCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , __lowerCamelCase ):
_snake_case = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_snake_case = array.transpose()
if pointer.shape == array.shape:
_snake_case = torch.from_numpy(__lowerCamelCase )
else:
raise ValueError(
f'''Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'''
f''' {array.shape}''' )
logger.info(f'''Successfully set variable {full_name} to PyTorch layer {trace}''' )
return model
def _UpperCAmelCase ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] ) -> int:
# Instantiate model
logger.info(f'''Loading model based on config from {config_path}...''' )
_snake_case = BertConfig.from_json_file(__lowerCamelCase )
_snake_case = BertModel(__lowerCamelCase )
# Load weights from checkpoint
logger.info(f'''Loading weights from checkpoint {tf_checkpoint_path}...''' )
load_tfa_weights_in_bert(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# Save pytorch-model
logger.info(f'''Saving PyTorch model to {pytorch_dump_path}...''' )
torch.save(model.state_dict() , __lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
UpperCAmelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 430
| 0
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = R"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
"""
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
@add_start_docstrings(a )
def __call__( self , a , a , **a ) -> bool:
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a = None ) -> Dict:
"""simple docstring"""
_A = max_length
_A = max_position_embeddings
@add_start_docstrings(a )
def __call__( self , a , a , **a ) -> bool:
"""simple docstring"""
_A = input_ids.shape[-1]
_A = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , a , )
_A = start_length
_A = max_new_tokens
_A = start_length + max_new_tokens
@add_start_docstrings(a )
def __call__( self , a , a , **a ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
def __init__( self , a , a = None ) -> Dict:
"""simple docstring"""
_A = max_time
_A = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(a )
def __call__( self , a , a , **a ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _snake_case ( lowerCamelCase ):
"""simple docstring"""
@add_start_docstrings(a )
def __call__( self , a , a , **a ) -> bool:
"""simple docstring"""
return any(criteria(a , a ) for criteria in self )
@property
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(a , a ):
return stopping_criterium.max_length
elif isinstance(a , a ):
return stopping_criterium.max_length
return None
def UpperCAmelCase__ ( __snake_case , __snake_case ) -> StoppingCriteriaList:
_A = stopping_criteria.max_length
_A = deepcopy(__snake_case )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , __snake_case )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=__snake_case ) )
return new_stopping_criteria
| 317
|
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _snake_case ( lowerCamelCase ,lowerCamelCase ):
"""simple docstring"""
lowerCamelCase_ = '''pixel_values'''
lowerCamelCase_ = False
lowerCamelCase_ = TimmBackboneConfig
def __init__( self , a , **a ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , '''timm''' )
super().__init__(a )
_A = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(a , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
_A = getattr(a , '''use_pretrained_backbone''' , a )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
_A = config.out_indices if getattr(a , '''out_indices''' , a ) is not None else (-1,)
_A = timm.create_model(
config.backbone , pretrained=a , features_only=config.features_only , in_chans=config.num_channels , out_indices=a , **a , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_A = self._backbone.return_layers
_A = {layer['''module''']: str(a ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a )
@classmethod
def lowercase_ ( cls , a , *a , **a ) -> int:
"""simple docstring"""
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
_A = kwargs.pop('''config''' , TimmBackboneConfig() )
_A = kwargs.pop('''use_timm_backbone''' , a )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
_A = kwargs.pop('''num_channels''' , config.num_channels )
_A = kwargs.pop('''features_only''' , config.features_only )
_A = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
_A = kwargs.pop('''out_indices''' , config.out_indices )
_A = TimmBackboneConfig(
backbone=a , num_channels=a , features_only=a , use_pretrained_backbone=a , out_indices=a , )
return super()._from_config(a , **a )
def lowercase_ ( self , a ) -> Optional[Any]:
"""simple docstring"""
pass
def lowercase_ ( self , a , a=None , a=None , a=None , **a ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
"""simple docstring"""
_A = return_dict if return_dict is not None else self.config.use_return_dict
_A = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_A = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_A = self._all_layers
_A = self._backbone(a , **a )
_A = self._return_layers
_A = tuple(hidden_states[i] for i in self.out_indices )
else:
_A = self._backbone(a , **a )
_A = None
_A = tuple(a )
_A = tuple(a ) if hidden_states is not None else None
if not return_dict:
_A = (feature_maps,)
if output_hidden_states:
_A = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a , hidden_states=a , attentions=a )
| 317
| 1
|
from __future__ import annotations
import math
import random
from typing import Any
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : list[Any] = []
__UpperCAmelCase : int = 0
__UpperCAmelCase : int = 0
def snake_case__ ( self : int ):
'''simple docstring'''
return self.head == self.tail
def snake_case__ ( self : Optional[int] , a_ : Any ):
'''simple docstring'''
self.data.append(a_ )
__UpperCAmelCase : int = self.tail + 1
def snake_case__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = self.data[self.head]
__UpperCAmelCase : List[Any] = self.head + 1
return ret
def snake_case__ ( self : Any ):
'''simple docstring'''
return self.tail - self.head
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
print(self.data )
print('''**************''' )
print(self.data[self.head : self.tail] )
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = data
__UpperCAmelCase : MyNode | None = None
__UpperCAmelCase : MyNode | None = None
__UpperCAmelCase : int = 1
def snake_case__ ( self : List[str] ):
'''simple docstring'''
return self.data
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.left
def snake_case__ ( self : str ):
'''simple docstring'''
return self.right
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return self.height
def snake_case__ ( self : List[str] , a_ : Any ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = data
def snake_case__ ( self : List[str] , a_ : MyNode | None ):
'''simple docstring'''
__UpperCAmelCase : Tuple = node
def snake_case__ ( self : List[str] , a_ : MyNode | None ):
'''simple docstring'''
__UpperCAmelCase : int = node
def snake_case__ ( self : int , a_ : int ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = height
def a ( _UpperCAmelCase : MyNode | None ):
'''simple docstring'''
if node is None:
return 0
return node.get_height()
def a ( _UpperCAmelCase : int , _UpperCAmelCase : int ):
'''simple docstring'''
if a > b:
return a
return b
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
print('''left rotation node:''' , node.get_data() )
__UpperCAmelCase : int = node.get_left()
assert ret is not None
node.set_left(ret.get_right() )
ret.set_right(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCAmelCase )
return ret
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
print('''right rotation node:''' , node.get_data() )
__UpperCAmelCase : int = node.get_right()
assert ret is not None
node.set_right(ret.get_left() )
ret.set_left(_UpperCAmelCase )
__UpperCAmelCase : Optional[Any] = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
__UpperCAmelCase : Dict = my_max(get_height(ret.get_right() ) , get_height(ret.get_left() ) ) + 1
ret.set_height(_UpperCAmelCase )
return ret
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = node.get_left()
assert left_child is not None
node.set_left(left_rotation(_UpperCAmelCase ) )
return right_rotation(_UpperCAmelCase )
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
__UpperCAmelCase : Tuple = node.get_right()
assert right_child is not None
node.set_right(right_rotation(_UpperCAmelCase ) )
return left_rotation(_UpperCAmelCase )
def a ( _UpperCAmelCase : MyNode | None , _UpperCAmelCase : Any ):
'''simple docstring'''
if node is None:
return MyNode(_UpperCAmelCase )
if data < node.get_data():
node.set_left(insert_node(node.get_left() , _UpperCAmelCase ) )
if (
get_height(node.get_left() ) - get_height(node.get_right() ) == 2
): # an unbalance detected
__UpperCAmelCase : int = node.get_left()
assert left_child is not None
if (
data < left_child.get_data()
): # new node is the left child of the left child
__UpperCAmelCase : str = right_rotation(_UpperCAmelCase )
else:
__UpperCAmelCase : Union[str, Any] = lr_rotation(_UpperCAmelCase )
else:
node.set_right(insert_node(node.get_right() , _UpperCAmelCase ) )
if get_height(node.get_right() ) - get_height(node.get_left() ) == 2:
__UpperCAmelCase : List[Any] = node.get_right()
assert right_child is not None
if data < right_child.get_data():
__UpperCAmelCase : Optional[Any] = rl_rotation(_UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = left_rotation(_UpperCAmelCase )
__UpperCAmelCase : Dict = my_max(get_height(node.get_right() ) , get_height(node.get_left() ) ) + 1
node.set_height(_UpperCAmelCase )
return node
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
while True:
__UpperCAmelCase : Union[str, Any] = root.get_right()
if right_child is None:
break
__UpperCAmelCase : List[Any] = right_child
return root.get_data()
def a ( _UpperCAmelCase : MyNode ):
'''simple docstring'''
while True:
__UpperCAmelCase : List[Any] = root.get_left()
if left_child is None:
break
__UpperCAmelCase : int = left_child
return root.get_data()
def a ( _UpperCAmelCase : MyNode , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = root.get_left()
__UpperCAmelCase : Optional[int] = root.get_right()
if root.get_data() == data:
if left_child is not None and right_child is not None:
__UpperCAmelCase : int = get_left_most(_UpperCAmelCase )
root.set_data(_UpperCAmelCase )
root.set_right(del_node(_UpperCAmelCase , _UpperCAmelCase ) )
elif left_child is not None:
__UpperCAmelCase : List[Any] = left_child
elif right_child is not None:
__UpperCAmelCase : Optional[int] = right_child
else:
return None
elif root.get_data() > data:
if left_child is None:
print('''No such data''' )
return root
else:
root.set_left(del_node(_UpperCAmelCase , _UpperCAmelCase ) )
else: # root.get_data() < data
if right_child is None:
return root
else:
root.set_right(del_node(_UpperCAmelCase , _UpperCAmelCase ) )
if get_height(_UpperCAmelCase ) - get_height(_UpperCAmelCase ) == 2:
assert right_child is not None
if get_height(right_child.get_right() ) > get_height(right_child.get_left() ):
__UpperCAmelCase : Any = left_rotation(_UpperCAmelCase )
else:
__UpperCAmelCase : Optional[Any] = rl_rotation(_UpperCAmelCase )
elif get_height(_UpperCAmelCase ) - get_height(_UpperCAmelCase ) == -2:
assert left_child is not None
if get_height(left_child.get_left() ) > get_height(left_child.get_right() ):
__UpperCAmelCase : str = right_rotation(_UpperCAmelCase )
else:
__UpperCAmelCase : List[str] = lr_rotation(_UpperCAmelCase )
__UpperCAmelCase : Tuple = my_max(get_height(root.get_right() ) , get_height(root.get_left() ) ) + 1
root.set_height(_UpperCAmelCase )
return root
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : MyNode | None = None
def snake_case__ ( self : int ):
'''simple docstring'''
return get_height(self.root )
def snake_case__ ( self : Any , a_ : Any ):
'''simple docstring'''
print('''insert:''' + str(a_ ) )
__UpperCAmelCase : Optional[int] = insert_node(self.root , a_ )
def snake_case__ ( self : Dict , a_ : Any ):
'''simple docstring'''
print('''delete:''' + str(a_ ) )
if self.root is None:
print('''Tree is empty!''' )
return
__UpperCAmelCase : Optional[Any] = del_node(self.root , a_ )
def __str__( self : List[Any] , ): # a level traversale, gives a more intuitive look on the tree
'''simple docstring'''
__UpperCAmelCase : int = ''''''
__UpperCAmelCase : Optional[Any] = MyQueue()
q.push(self.root )
__UpperCAmelCase : str = self.get_height()
if layer == 0:
return output
__UpperCAmelCase : Optional[Any] = 0
while not q.is_empty():
__UpperCAmelCase : str = q.pop()
__UpperCAmelCase : Optional[Any] = ''' ''' * int(math.pow(2 , layer - 1 ) )
output += space
if node is None:
output += "*"
q.push(a_ )
q.push(a_ )
else:
output += str(node.get_data() )
q.push(node.get_left() )
q.push(node.get_right() )
output += space
__UpperCAmelCase : List[Any] = cnt + 1
for i in range(1_00 ):
if cnt == math.pow(2 , a_ ) - 1:
__UpperCAmelCase : int = layer - 1
if layer == 0:
output += "\n*************************************"
return output
output += "\n"
break
output += "\n*************************************"
return output
def a ( ):
'''simple docstring'''
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
__A =AVLtree()
__A =list(range(1_0))
random.shuffle(lst)
for i in lst:
t.insert(i)
print(str(t))
random.shuffle(lst)
for i in lst:
t.del_node(i)
print(str(t))
| 241
|
from functools import reduce
__A =(
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def a ( _UpperCAmelCase : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _UpperCAmelCase , _UpperCAmelCase : str(int(_UpperCAmelCase ) * int(_UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(_UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 241
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
class A_ ( A__ ):
"""simple docstring"""
def __init__( self :Optional[Any] , lowerCamelCase_ :Optional[Any]=1 , lowerCamelCase_ :int=0 , lowerCamelCase_ :Any=2 , lowerCamelCase_ :Any=512 , lowerCamelCase_ :List[Any]="cls" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :List[str]=True , **lowerCamelCase_ :Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
lowerCamelCase__ : int =project_dim
lowerCamelCase__ : Tuple =pooler_fn
lowerCamelCase__ : Union[str, Any] =learn_encoder
lowerCamelCase__ : Optional[Any] =use_attention_mask
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [r"""pooler""", r"""logit_scale"""]
SCREAMING_SNAKE_CASE_ = [r"""position_ids""", r"""predictions.decoder.bias"""]
SCREAMING_SNAKE_CASE_ = """roberta"""
SCREAMING_SNAKE_CASE_ = RobertaSeriesConfig
def __init__( self :str , lowerCamelCase_ :str ):
"""simple docstring"""
super().__init__(lowerCamelCase_ )
lowerCamelCase__ : List[str] =XLMRobertaModel(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase__ : List[Any] =getattr(lowerCamelCase_ , 'has_pre_transformation' , lowerCamelCase_ )
if self.has_pre_transformation:
lowerCamelCase__ : List[Any] =nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase__ : List[str] =nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[torch.Tensor] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[bool] = None , lowerCamelCase_ :Optional[bool] = None , ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase__ : Optional[int] =self.base_model(
input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , token_type_ids=lowerCamelCase_ , position_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ , inputs_embeds=lowerCamelCase_ , encoder_hidden_states=lowerCamelCase_ , encoder_attention_mask=lowerCamelCase_ , output_attentions=lowerCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=lowerCamelCase_ , )
if self.has_pre_transformation:
lowerCamelCase__ : Optional[int] =outputs['hidden_states'][-2]
lowerCamelCase__ : Optional[Any] =self.pre_LN(lowerCamelCase_ )
lowerCamelCase__ : List[str] =self.transformation_pre(lowerCamelCase_ )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase__ : str =self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=lowerCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 174
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCAmelCase_ ( snake_case_ : str ) ->str:
return "".join(sorted(snake_case_ ) )
def lowerCAmelCase_ ( snake_case_ : str ) ->list[str]:
return word_by_signature[signature(snake_case_ )]
lowerCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""")
lowerCAmelCase = sorted({word.strip().lower() for word in data.splitlines()})
lowerCAmelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
lowerCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open("""anagrams.txt""", """w""") as file:
file.write("""all_anagrams = \n """)
file.write(pprint.pformat(all_anagrams))
| 174
| 1
|
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _a( __A ):
lowerCamelCase__ :torch.FloatTensor
class _a( __A , __A ):
@register_to_config
def __init__( self , __snake_case = 1_6 , __snake_case = 8_8 , __snake_case = None , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = 3_2 , __snake_case = None , __snake_case = False , __snake_case = None , __snake_case = "geglu" , __snake_case = True , __snake_case = True , ) -> List[str]:
'''simple docstring'''
super().__init__()
_snake_case : List[str] = num_attention_heads
_snake_case : str = attention_head_dim
_snake_case : Optional[int] = num_attention_heads * attention_head_dim
_snake_case : Optional[int] = in_channels
_snake_case : Optional[int] = torch.nn.GroupNorm(num_groups=__snake_case , num_channels=__snake_case , eps=1E-6 , affine=__snake_case )
_snake_case : Optional[int] = nn.Linear(__snake_case , __snake_case )
# 3. Define transformers blocks
_snake_case : Dict = nn.ModuleList(
[
BasicTransformerBlock(
__snake_case , __snake_case , __snake_case , dropout=__snake_case , cross_attention_dim=__snake_case , activation_fn=__snake_case , attention_bias=__snake_case , double_self_attention=__snake_case , norm_elementwise_affine=__snake_case , )
for d in range(__snake_case )
] )
_snake_case : Tuple = nn.Linear(__snake_case , __snake_case )
def lowercase ( self , __snake_case , __snake_case=None , __snake_case=None , __snake_case=None , __snake_case=1 , __snake_case=None , __snake_case = True , ) -> int:
'''simple docstring'''
_snake_case , _snake_case , _snake_case , _snake_case : Union[str, Any] = hidden_states.shape
_snake_case : Optional[int] = batch_frames // num_frames
_snake_case : Tuple = hidden_states
_snake_case : int = hidden_states[None, :].reshape(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
_snake_case : Dict = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_snake_case : List[Any] = self.norm(__snake_case )
_snake_case : Union[str, Any] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , __snake_case , __snake_case )
_snake_case : List[str] = self.proj_in(__snake_case )
# 2. Blocks
for block in self.transformer_blocks:
_snake_case : Optional[Any] = block(
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , class_labels=__snake_case , )
# 3. Output
_snake_case : List[str] = self.proj_out(__snake_case )
_snake_case : Dict = (
hidden_states[None, None, :]
.reshape(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_snake_case : Optional[Any] = hidden_states.reshape(__snake_case , __snake_case , __snake_case , __snake_case )
_snake_case : List[Any] = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=__snake_case )
| 278
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase :Tuple = logging.get_logger(__name__)
__lowerCAmelCase :int = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class _a( __A ):
lowerCamelCase__ :Optional[Any] = 'timesformer'
def __init__( self , __snake_case=2_2_4 , __snake_case=1_6 , __snake_case=3 , __snake_case=8 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=3_0_7_2 , __snake_case="gelu" , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.02 , __snake_case=1E-6 , __snake_case=True , __snake_case="divided_space_time" , __snake_case=0 , **__snake_case , ) -> str:
'''simple docstring'''
super().__init__(**__snake_case )
_snake_case : Optional[Any] = image_size
_snake_case : Optional[int] = patch_size
_snake_case : str = num_channels
_snake_case : Tuple = num_frames
_snake_case : Union[str, Any] = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : List[Any] = num_attention_heads
_snake_case : Optional[int] = intermediate_size
_snake_case : List[str] = hidden_act
_snake_case : Optional[int] = hidden_dropout_prob
_snake_case : Optional[Any] = attention_probs_dropout_prob
_snake_case : Dict = initializer_range
_snake_case : Optional[int] = layer_norm_eps
_snake_case : str = qkv_bias
_snake_case : List[str] = attention_type
_snake_case : Optional[int] = drop_path_rate
| 278
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
a__ : Any =logging.get_logger(__name__)
def lowercase__ ( __lowercase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__UpperCamelCase = 1024
__UpperCamelCase = 4096
__UpperCamelCase = 24
__UpperCamelCase = 16
__UpperCamelCase = [5, 11, 17, 23]
__UpperCamelCase = [256, 512, 1024, 1024]
__UpperCamelCase = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__UpperCamelCase = 768
__UpperCamelCase = [1, 1, 1, 0.5]
__UpperCamelCase = [256, 512, 768, 768]
__UpperCamelCase = 150
__UpperCamelCase = 16
__UpperCamelCase = (1, 384, 384)
__UpperCamelCase = False
__UpperCamelCase = 'project'
if "ade" in checkpoint_url:
__UpperCamelCase = True
__UpperCamelCase = 768
__UpperCamelCase = [1, 1, 1, 0.5]
__UpperCamelCase = 150
__UpperCamelCase = 16
__UpperCamelCase = 'huggingface/label-files'
__UpperCamelCase = 'ade20k-id2label.json'
__UpperCamelCase = json.load(open(cached_download(hf_hub_url(__lowercase , __lowercase , repo_type='dataset' ) ) , 'r' ) )
__UpperCamelCase = {int(__lowercase ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
__UpperCamelCase = [1, 150, 480, 480]
return config, expected_shape
def lowercase__ ( __lowercase : int ) -> str:
"""simple docstring"""
__UpperCamelCase = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(__lowercase , __lowercase )
def lowercase__ ( __lowercase : Dict ) -> Dict:
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__UpperCamelCase = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__UpperCamelCase = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__UpperCamelCase = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__UpperCamelCase = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__UpperCamelCase = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__UpperCamelCase = name.replace('proj' , 'projection' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__UpperCamelCase = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__UpperCamelCase = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__UpperCamelCase = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__UpperCamelCase = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__UpperCamelCase = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__UpperCamelCase = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__UpperCamelCase = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__UpperCamelCase = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__UpperCamelCase = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__UpperCamelCase = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__UpperCamelCase = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__UpperCamelCase = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
__UpperCamelCase = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__UpperCamelCase = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__UpperCamelCase = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__UpperCamelCase = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__UpperCamelCase = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__UpperCamelCase = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__UpperCamelCase = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__UpperCamelCase = name.replace('bn' , 'batch_norm' )
if "head" in name:
__UpperCamelCase = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__UpperCamelCase = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__UpperCamelCase = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__UpperCamelCase = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__UpperCamelCase = name.replace('..' , '.' )
if "stem.conv" in name:
__UpperCamelCase = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__UpperCamelCase = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__UpperCamelCase = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__UpperCamelCase = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__UpperCamelCase = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__UpperCamelCase = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__UpperCamelCase = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowercase__ ( __lowercase : Union[str, Any] , __lowercase : str ) -> List[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__UpperCamelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
__UpperCamelCase = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__UpperCamelCase = in_proj_weight[: config.hidden_size, :]
__UpperCamelCase = in_proj_bias[: config.hidden_size]
__UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
__UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCamelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase = Image.open(requests.get(__lowercase , stream=__lowercase ).raw )
return im
@torch.no_grad()
def lowercase__ ( __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Optional[int] ) -> Any:
"""simple docstring"""
__UpperCamelCase , __UpperCamelCase = get_dpt_config(__lowercase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__UpperCamelCase = torch.load(__lowercase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(__lowercase )
# rename keys
for key in state_dict.copy().keys():
__UpperCamelCase = state_dict.pop(__lowercase )
__UpperCamelCase = val
# read in qkv matrices
read_in_q_k_v(__lowercase , __lowercase )
# load HuggingFace model
__UpperCamelCase = DPTForSemanticSegmentation(__lowercase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(__lowercase )
model.load_state_dict(__lowercase )
model.eval()
# Check outputs on an image
__UpperCamelCase = 480 if 'ade' in checkpoint_url else 384
__UpperCamelCase = DPTImageProcessor(size=__lowercase )
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(__lowercase , return_tensors='pt' )
# forward pass
__UpperCamelCase = model(**__lowercase ).logits if 'ade' in checkpoint_url else model(**__lowercase ).predicted_depth
if show_prediction:
__UpperCamelCase = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=__lowercase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowercase ).mkdir(exist_ok=__lowercase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowercase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowercase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
a__ : Tuple =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
a__ : Optional[Any] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 399
|
'''simple docstring'''
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def lowercase__ ( __lowercase : Any ) -> Optional[int]:
"""simple docstring"""
monkeypatch.setattr('datasets.utils.deprecation_utils._emitted_deprecation_warnings' , set() )
@pytest.fixture
def lowercase__ ( __lowercase : List[Any] ) -> List[str]:
"""simple docstring"""
class snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Any ):
__UpperCamelCase = metric_id
class snake_case :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict =[MetricMock(__lowerCamelCase ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowerCamelCase ( self : str ):
return self._metrics
monkeypatch.setattr('datasets.inspect.huggingface_hub' , HfhMock() )
@pytest.mark.parametrize(
'func, args' , [(load_metric, ('metrics/mse',)), (list_metrics, ()), (inspect_metric, ('metrics/mse', 'tmp_path'))] )
def lowercase__ ( __lowercase : Tuple , __lowercase : Union[str, Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if "tmp_path" in args:
__UpperCamelCase = tuple(arg if arg != 'tmp_path' else tmp_path for arg in args )
with pytest.warns(__lowercase , match='https://huggingface.co/docs/evaluate' ):
func(*__lowercase )
| 399
| 1
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
UpperCAmelCase ={
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _A ( _a : str ) -> Any:
"""simple docstring"""
A = list(s_dict.keys() )
for key in keys:
A = r""".*/layers_(\d+)"""
A = key
if re.match(_a , _a ):
A = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , _a )
A = r"""(encoder|decoder)\/"""
if re.match(_a , _a ):
A = re.match(_a , _a ).groups()
if groups[0] == "encoder":
A = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , _a )
elif groups[0] == "decoder":
A = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , _a )
A = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , _a )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
A = new_key.replace(_a , _a )
print(f'{key} -> {new_key}' )
A = s_dict.pop(_a )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
A = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
A = s_dict[key].shape[0]
A = s_dict[key]
for idx in range(_a ):
A = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(_a )
return s_dict
UpperCAmelCase ={
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _A ( _a : Any , _a : Tuple ) -> Optional[Any]:
"""simple docstring"""
import regex as re
with open(_a , """r""" ) as f:
A = f.read()
A = re.findall(r"""(.*) = ([0-9.]*)""" , _a )
A = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
A = float(_a ) if """.""" in value else int(_a )
A = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , _a )[0]
A = str(activation[1] )
A = num_experts
A = SwitchTransformersConfig(**_a )
return config
def _A ( _a : int , _a : Dict , _a : List[str]=None , _a : Any="./" , _a : Any=8 ) -> Dict:
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
A = checkpoints.load_tax_checkpoint(_a )
if gin_file is not None:
A = convert_gin_to_config(_a , _a )
else:
A = SwitchTransformersConfig.from_pretrained(_a )
A = SwitchTransformersForConditionalGeneration(_a )
A = flax_params["""target"""]
A = flatten_dict(_a , sep="""/""" )
A = rename_keys(_a )
A = unflatten_dict(_a , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_a , _a )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(_a )
if __name__ == "__main__":
UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
UpperCAmelCase =parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 711
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
UpperCAmelCase =get_logger(__name__)
UpperCAmelCase =R"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ :
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ ) -> jnp.ndarray:
for processor in self:
A = inspect.signature(processor.__call__ ).parameters
if len(lowerCamelCase_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ,**lowerCamelCase_ )
else:
A = processor(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
A = temperature
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores / self.temperature
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> Dict:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
A = top_p
A = filter_value
A = min_tokens_to_keep
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = lax.top_k(lowerCamelCase_ ,scores.shape[-1] )
A = jnp.full_like(lowerCamelCase_ ,self.filter_value )
A = jax.nn.softmax(lowerCamelCase_ ,axis=-1 ).cumsum(axis=-1 )
A = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A = jnp.roll(lowerCamelCase_ ,1 )
score_mask |= score_mask.at[:, 0].set(lowerCamelCase_ )
# min tokens to keep
A = score_mask.at[:, : self.min_tokens_to_keep].set(lowerCamelCase_ )
A = jnp.where(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
A = jax.lax.sort_key_val(lowerCamelCase_ ,lowerCamelCase_ )[-1]
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ = -float("""Inf""" ) ,lowerCamelCase_ = 1 ) -> List[Any]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
A = max(lowerCamelCase_ ,lowerCamelCase_ )
A = filter_value
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A , A = scores.shape
A = jnp.full(batch_size * vocab_size ,self.filter_value )
A = min(self.top_k ,scores.shape[-1] ) # Safety check
A , A = lax.top_k(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.broadcast_to((jnp.arange(lowerCamelCase_ ) * vocab_size)[:, None] ,(batch_size, topk) ).flatten()
A = topk_scores.flatten()
A = topk_indices.flatten() + shift
A = next_scores_flat.at[topk_indices_flat].set(lowerCamelCase_ )
A = next_scores_flat.reshape(lowerCamelCase_ ,lowerCamelCase_ )
return next_scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> List[Any]:
A = bos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.bos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Union[str, Any]:
A = max_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = jnp.full(scores.shape ,-float("""inf""" ) )
A = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A = jnp.where(lowerCamelCase_ ,new_scores.at[:, self.eos_token_id].set(0 ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(lowerCamelCase_ ,lowerCamelCase_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
A = min_length
A = eos_token_id
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
# create boolean flag to decide if min length penalty should be applied
A = 1 - jnp.clip(cur_len - self.min_length ,0 ,1 )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[str]:
A = list(lowerCamelCase_ )
A = begin_index
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Dict:
A = 1 - jnp.bool_(cur_len - self.begin_index )
A = jnp.where(lowerCamelCase_ ,scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) ,lowerCamelCase_ )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> str:
A = list(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
A = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) )
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ) -> Union[str, Any]:
A = dict(lowerCamelCase_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A = jnp.ones((max(force_token_map.keys() ) + 1) ,dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A = force_token_array.at[index].set(lowerCamelCase_ )
A = jnp.intaa(lowerCamelCase_ )
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> jnp.ndarray:
def _force_token(lowerCamelCase_ ):
A = scores.shape[0]
A = self.force_token_array[generation_idx]
A = jnp.ones_like(lowerCamelCase_ ,dtype=scores.dtype ) * -float("""inf""" )
A = jnp.zeros((batch_size, 1) ,dtype=scores.dtype )
A = lax.dynamic_update_slice(lowerCamelCase_ ,lowerCamelCase_ ,(0, current_token) )
return new_scores
A = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(lowerCamelCase_ ) ,lambda: scores ,) ,)
return scores
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> Optional[int]:
A = generate_config.eos_token_id
A = generate_config.no_timestamps_token_id
A = generate_config.no_timestamps_token_id + 1
A = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(lowerCamelCase_ ,"""max_initial_timestamp_index""" ):
A = generate_config.max_initial_timestamp_index
else:
A = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A = model_config.vocab_size
def __call__( self ,lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ ) -> List[Any]:
# suppress <|notimestamps|> which is handled by without_timestamps
A = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) )
def handle_pairs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jnp.where((cur_len - self.begin_index) >= 1 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,lowerCamelCase_ ,)
A = jnp.where((cur_len - self.begin_index) < 2 ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,lowerCamelCase_ ,lowerCamelCase_ ,)
return jnp.where(
lowerCamelCase_ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) ,scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) ,) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(cur_len == self.begin_index ,lowerCamelCase_ ,lowerCamelCase_ )
A = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,lowerCamelCase_ ,)
A = self.timestamp_begin + self.max_initial_timestamp_index
A = jnp.where(
lowerCamelCase_ ,scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
A = jax.nn.log_softmax(lowerCamelCase_ ,axis=-1 )
def handle_cumulative_probs(lowerCamelCase_ ,lowerCamelCase_ ):
A = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1 )
A = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) ,lowerCamelCase_ ,)
A = jax.vmap(lowerCamelCase_ )(lowerCamelCase_ ,lowerCamelCase_ )
return scores
| 255
| 0
|
"""simple docstring"""
def snake_case ( lowerCAmelCase_ = 10**12 ) -> int:
_snake_case = 1
_snake_case = 0
_snake_case = 1
_snake_case = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F"{solution() = }")
| 103
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__UpperCAmelCase = None
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__UpperCAmelCase = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__UpperCAmelCase = {
'google/bigbird-roberta-base': 40_96,
'google/bigbird-roberta-large': 40_96,
'google/bigbird-base-trivia-itc': 40_96,
}
__UpperCAmelCase = '▁'
class __a ( __UpperCamelCase ):
__snake_case : int = VOCAB_FILES_NAMES
__snake_case : int = PRETRAINED_VOCAB_FILES_MAP
__snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : Optional[Any] = BigBirdTokenizer
__snake_case : Any = ["""input_ids""", """attention_mask"""]
__snake_case : List[int] = []
def __init__( self : str , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : Optional[int]="<s>" , UpperCAmelCase : List[str]="</s>" , UpperCAmelCase : List[Any]="<pad>" , UpperCAmelCase : str="[SEP]" , UpperCAmelCase : str="[MASK]" , UpperCAmelCase : Any="[CLS]" , **UpperCAmelCase : List[str] , ):
lowerCAmelCase_ : int = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowerCAmelCase_ : Optional[int] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowerCAmelCase_ : int = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowerCAmelCase_ : str = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
lowerCAmelCase_ : Tuple = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowerCAmelCase_ : Tuple = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase_ : List[Any] = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , pad_token=UpperCAmelCase , cls_token=UpperCAmelCase , mask_token=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ : Union[str, Any] = vocab_file
lowerCAmelCase_ : Union[str, Any] = False if not self.vocab_file else True
def A ( self : Optional[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : Tuple = [self.sep_token_id]
lowerCAmelCase_ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ):
lowerCAmelCase_ : Optional[Any] = [self.sep_token_id]
lowerCAmelCase_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase_ : int = os.path.join(
UpperCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 600
| 0
|
"""simple docstring"""
import pprint
import requests
lowercase__ : Optional[Any] = '''https://zenquotes.io/api'''
def __lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def __lowercase ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
lowercase__ : List[Any] = random_quotes()
pprint.pprint(response)
| 711
|
"""simple docstring"""
from statistics import mean, stdev
def __lowercase ( _a , _a = 3 ):
snake_case_ : Optional[int] = min(_a )
snake_case_ : str = max(_a )
# normalize data
return [round((x - x_min) / (x_max - x_min) , _a ) for x in data]
def __lowercase ( _a , _a = 3 ):
snake_case_ : Any = mean(_a )
snake_case_ : str = stdev(_a )
# standardize data
return [round((x - mu) / (sigma) , _a ) for x in data]
| 485
| 0
|
'''simple docstring'''
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def UpperCAmelCase ( A : str ):
return EnvironmentCommand()
def UpperCAmelCase ( A : List[str] ):
return EnvironmentCommand(args.accelerate_config_file )
class lowerCamelCase_ ( snake_case_ ):
@staticmethod
def __lowercase ( lowerCAmelCase__ : ArgumentParser ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=lowerCAmelCase__ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=lowerCAmelCase__ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : int , lowerCAmelCase__ : Dict , *lowerCAmelCase__ : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = accelerate_config_file
def __lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE : Optional[Any] = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE : Tuple = F"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
SCREAMING_SNAKE_CASE : Optional[int] = '''not installed'''
SCREAMING_SNAKE_CASE : Union[str, Any] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE : List[str] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE : Union[str, Any] = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE : List[str] = (
'''\n'''.join([F"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
else F"""\t{accelerate_config}"""
)
SCREAMING_SNAKE_CASE : Tuple = '''not installed'''
SCREAMING_SNAKE_CASE : Optional[int] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : int = torch.__version__
SCREAMING_SNAKE_CASE : List[str] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : Dict = '''not installed'''
SCREAMING_SNAKE_CASE : int = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE : Any = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE : List[str] = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE : Dict = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE : List[str] = '''not installed'''
SCREAMING_SNAKE_CASE : str = '''not installed'''
SCREAMING_SNAKE_CASE : List[Any] = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE : Union[str, Any] = flax.__version__
SCREAMING_SNAKE_CASE : Dict = jax.__version__
SCREAMING_SNAKE_CASE : List[Any] = jaxlib.__version__
SCREAMING_SNAKE_CASE : Any = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE : int = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': F"""{safetensors_version}""",
'''Accelerate version''': F"""{accelerate_version}""",
'''Accelerate config''': F"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': F"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': F"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': F"""{flax_version} ({jax_backend})""",
'''Jax version''': F"""{jax_version}""",
'''JaxLib version''': F"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(lowerCAmelCase__ ) )
return info
@staticmethod
def __lowercase ( lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 527
|
'''simple docstring'''
from __future__ import annotations
def UpperCAmelCase ( A : list[int] , A : int ):
if len(A ) < k or k < 0:
raise ValueError('''Invalid Input''' )
SCREAMING_SNAKE_CASE : Dict = sum(array[:k] )
for i in range(len(A ) - k ):
SCREAMING_SNAKE_CASE : Optional[Any] = current_sum - array[i] + array[i + k]
SCREAMING_SNAKE_CASE : Union[str, Any] = max(A , A )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowerCAmelCase_ : Optional[int] = [randint(-1000, 1000) for i in range(100)]
lowerCAmelCase_ : Optional[int] = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 527
| 1
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : List[Any] = abspath(join(dirname(__file__), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
A : Dict = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if exitstatus == 5:
A : List[str] = 0
# Doctest custom flag to ignore output.
lowercase : Union[str, Any] = doctest.register_optionflag('IGNORE_RESULT')
lowercase : Any = doctest.OutputChecker
class A ( __snake_case ):
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase : Any = CustomOutputChecker
lowercase : Dict = HfDoctestModule
lowercase : int = HfDocTestParser
| 343
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[Any] = tempfile.mkdtemp()
A : List[Any] = SamImageProcessor()
A : Optional[int] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
A : int = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
A : Optional[Any] = self.get_image_processor()
A : Tuple = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.prepare_image_inputs()
A : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A : Any = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : str = self.get_image_processor()
A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : str = [torch.ones((1, 3, 5, 5) )]
A : Union[str, Any] = [[1764, 2646]]
A : Tuple = [[683, 1024]]
A : Tuple = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A : Union[str, Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , torch.tensor(SCREAMING_SNAKE_CASE ) , torch.tensor(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A : Optional[Any] = [np.ones((1, 3, 5, 5) )]
A : Tuple = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A : List[str] = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE ):
A : Union[str, Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) )
@require_vision
@require_tf
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
A : int = tempfile.mkdtemp()
A : Optional[int] = SamImageProcessor()
A : List[Any] = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Union[str, Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A : Any = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
A : Tuple = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = self.get_image_processor()
A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : Tuple = self.prepare_image_inputs()
A : Any = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''np''' )
input_feat_extract.pop('''original_sizes''' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('''reshaped_input_sizes''' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Any = self.get_image_processor()
A : Optional[int] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : List[Any] = [tf.ones((1, 3, 5, 5) )]
A : Any = [[1764, 2646]]
A : str = [[683, 1024]]
A : List[Any] = processor.post_process_masks(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A : List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , tf.convert_to_tensor(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
A : Dict = [np.ones((1, 3, 5, 5) )]
A : List[str] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
A : Union[str, Any] = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
A : Union[str, Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , np.array(SCREAMING_SNAKE_CASE ) , np.array(SCREAMING_SNAKE_CASE ) , return_tensors='''tf''' )
@require_vision
@require_torchvision
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Tuple = tempfile.mkdtemp()
A : Tuple = SamImageProcessor()
A : Dict = SamProcessor(SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE ).image_processor
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A : Tuple = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.get_image_processor()
A : int = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : Dict = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
A : Tuple = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE )]
A : Optional[Any] = [torch.tensor(SCREAMING_SNAKE_CASE )]
A : Dict = [[1764, 2646]]
A : List[Any] = [[683, 1024]]
A : List[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''tf''' )
A : Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[int] = self.get_image_processor()
A : Union[str, Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE )
A : Optional[Any] = self.prepare_image_inputs()
A : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy()
A : str = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' )['''pixel_values'''].numpy()
A : List[str] = image_processor(SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy()
A : int = processor(images=SCREAMING_SNAKE_CASE , return_tensors='''tf''' )['''pixel_values'''].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
| 343
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.