code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple ):
if is_torch_version('''<''' , '''2.0.0''' ) or not hasattr(_SCREAMING_SNAKE_CASE , '''_dynamo''' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __lowercase (_SCREAMING_SNAKE_CASE :Tuple , _SCREAMING_SNAKE_CASE :bool = True ):
SCREAMING_SNAKE_CASE : Any = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
SCREAMING_SNAKE_CASE : Any = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : int = model
SCREAMING_SNAKE_CASE : Tuple = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : Any = model.module
if not keep_fpaa_wrapper:
SCREAMING_SNAKE_CASE : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , '''forward''' )
SCREAMING_SNAKE_CASE : Tuple = model.__dict__.pop('''_original_forward''' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '''__wrapped__''' ):
SCREAMING_SNAKE_CASE : Optional[Any] = forward.__wrapped__
if forward == original_forward:
break
SCREAMING_SNAKE_CASE : Tuple = forward
if getattr(_SCREAMING_SNAKE_CASE , '''_converted_to_transformer_engine''' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
SCREAMING_SNAKE_CASE : List[Any] = model
SCREAMING_SNAKE_CASE : int = compiled_model
return model
def __lowercase ():
PartialState().wait_for_everyone()
def __lowercase (_SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Union[str, Any] ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __lowercase (**_SCREAMING_SNAKE_CASE :str ):
for key, value in kwargs.items():
SCREAMING_SNAKE_CASE : str = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] ):
if not hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ) and not hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
SCREAMING_SNAKE_CASE : str = getattr(_SCREAMING_SNAKE_CASE , '''__class__''' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '''__qualname__''' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '''__name__''' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __lowercase (_SCREAMING_SNAKE_CASE :Optional[Any] , _SCREAMING_SNAKE_CASE :str ):
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE : int = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE : Dict = value
return destination
def __lowercase (_SCREAMING_SNAKE_CASE :int = None ):
if port is None:
SCREAMING_SNAKE_CASE : Dict = 2_95_00
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('''localhost''', port) ) == 0
| 507
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 507
| 1
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a_ : List[Any] = logging.get_logger(__name__)
def __a ( __UpperCAmelCase , __UpperCAmelCase=False ):
a__ = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
a__ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
for i in range(config.num_hidden_layers ):
if base_model:
a__ = ''''''
else:
a__ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
a__ = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
a__ = in_proj_weight[
: config.hidden_size, :
]
a__ = in_proj_bias[: config.hidden_size]
a__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
a__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
a__ = in_proj_weight[
-config.hidden_size :, :
]
a__ = in_proj_bias[-config.hidden_size :]
def __a ( __UpperCAmelCase ):
a__ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
a__ = dct.pop(__UpperCAmelCase )
a__ = val
def __a ( ):
a__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
a__ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __a ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ):
a__ = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=__UpperCAmelCase , )
a__ = ViTHybridConfig(backbone_config=__UpperCAmelCase , image_size=384 , num_labels=1000 )
a__ = False
# load original model from timm
a__ = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
a__ = timm_model.state_dict()
if base_model:
remove_classification_head_(__UpperCAmelCase )
a__ = create_rename_keys(__UpperCAmelCase , __UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a__ = '''huggingface/label-files'''
a__ = '''imagenet-1k-id2label.json'''
a__ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
a__ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
a__ = idalabel
a__ = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
a__ = ViTHybridModel(__UpperCAmelCase ).eval()
else:
a__ = ViTHybridForImageClassification(__UpperCAmelCase ).eval()
model.load_state_dict(__UpperCAmelCase )
# create image processor
a__ = create_transform(**resolve_data_config({} , model=__UpperCAmelCase ) )
a__ = transform.transforms
a__ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
a__ = ViTHybridImageProcessor(
do_resize=__UpperCAmelCase , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__UpperCAmelCase , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=__UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
a__ = prepare_img()
a__ = transform(__UpperCAmelCase ).unsqueeze(0 )
a__ = processor(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
# verify logits
with torch.no_grad():
a__ = model(__UpperCAmelCase )
a__ = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
a__ = timm_model.forward_features(__UpperCAmelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__UpperCAmelCase , outputs.pooler_output , atol=1e-3 )
else:
a__ = timm_model(__UpperCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__UpperCAmelCase , outputs.logits , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__UpperCAmelCase ).mkdir(exist_ok=__UpperCAmelCase )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
a_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_r50_s16_384',
type=str,
help='Name of the hybrid ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
a_ : Dict = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 148
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def __a ( __UpperCAmelCase , __UpperCAmelCase = "cpu" , __UpperCAmelCase = None ):
a__ = torch.load(__UpperCAmelCase , map_location=__UpperCAmelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__UpperCAmelCase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
a__ = v.half()
if save_path is None: # overwrite src_path
a__ = src_path
torch.save(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
fire.Fire(convert)
| 148
| 1
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
snake_case = logging.get_logger(__name__)
@add_end_docstrings(__a )
class SCREAMING_SNAKE_CASE ( __a ):
"""simple docstring"""
def __init__( self : Tuple , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
super().__init__(**__lowerCAmelCase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : str , __lowerCAmelCase : Union[str, List[str], "Image", List["Image"]] , **__lowerCAmelCase : str ):
"""simple docstring"""
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def a ( self : Optional[int] , **__lowerCAmelCase : List[str] ):
"""simple docstring"""
_lowerCAmelCase = {}
if "candidate_labels" in kwargs:
_lowerCAmelCase = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
_lowerCAmelCase = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def a ( self : Union[str, Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : Any="This is a photo of {}." ):
"""simple docstring"""
_lowerCAmelCase = load_image(__lowerCAmelCase )
_lowerCAmelCase = self.image_processor(images=[image] , return_tensors=self.framework )
_lowerCAmelCase = candidate_labels
_lowerCAmelCase = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
_lowerCAmelCase = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
_lowerCAmelCase = [text_inputs]
return inputs
def a ( self : int , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCAmelCase = model_inputs.pop('candidate_labels' )
_lowerCAmelCase = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
_lowerCAmelCase = text_inputs[0]
else:
# Batching case.
_lowerCAmelCase = text_inputs[0][0]
_lowerCAmelCase = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
_lowerCAmelCase = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def a ( self : Optional[int] , __lowerCAmelCase : Any ):
"""simple docstring"""
_lowerCAmelCase = model_outputs.pop('candidate_labels' )
_lowerCAmelCase = model_outputs['logits'][0]
if self.framework == "pt":
_lowerCAmelCase = logits.softmax(dim=-1 ).squeeze(-1 )
_lowerCAmelCase = probs.tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_lowerCAmelCase = [scores]
elif self.framework == "tf":
_lowerCAmelCase = stable_softmax(__lowerCAmelCase , axis=-1 )
_lowerCAmelCase = probs.numpy().tolist()
else:
raise ValueError(F"Unsupported framework: {self.framework}" )
_lowerCAmelCase = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 309
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
snake_case = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
snake_case = [0, 25, 50]
snake_case = [25, 50, 75]
snake_case = fuzz.membership.trimf(X, abca)
snake_case = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
snake_case = np.ones(75)
snake_case = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
snake_case = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
snake_case = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
snake_case = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
snake_case = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
snake_case = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
snake_case = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
snake_case = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 309
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a :
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : str=13 , lowercase_ : Union[str, Any]=32 , lowercase_ : Tuple=3 , lowercase_ : List[str]=4 , lowercase_ : Optional[Any]=[10, 20, 30, 40] , lowercase_ : List[str]=[2, 2, 3, 2] , lowercase_ : Any=True , lowercase_ : Optional[Any]=True , lowercase_ : Union[str, Any]=37 , lowercase_ : List[str]="gelu" , lowercase_ : str=10 , lowercase_ : int=0.02 , lowercase_ : Optional[Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : int=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = num_stages
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = initializer_range
snake_case_ = out_features
snake_case_ = out_indices
snake_case_ = scope
def A_ ( self : List[Any] ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_snake_case , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def A_ ( self : Tuple , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ):
snake_case_ = ConvNextModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case_ = model(_snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A_ ( self : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : List[str] ):
snake_case_ = ConvNextForImageClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case_ = model(_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Dict ):
snake_case_ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case_ = model(_snake_case )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
snake_case_ = None
snake_case_ = ConvNextBackbone(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case_ = model(_snake_case )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ ,snake_case_ ,snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
snake_case_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = ConvNextModelTester(self )
snake_case_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def A_ ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A_ ( self : Dict ):
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def A_ ( self : int ):
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def A_ ( self : int ):
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def A_ ( self : str ):
pass
def A_ ( self : Optional[Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_snake_case )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _snake_case )
def A_ ( self : str ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def A_ ( self : int ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_snake_case )
def A_ ( self : Tuple ):
def check_hidden_states_output(lowercase_ : int , lowercase_ : int , lowercase_ : Union[str, Any] ):
snake_case_ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
snake_case_ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_snake_case ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def A_ ( self : Optional[Any] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_snake_case )
@slow
def A_ ( self : str ):
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ConvNextModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def A_ ( self : List[str] ):
snake_case_ = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(_snake_case )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_snake_case , return_tensors='''pt''' ).to(_snake_case )
# forward pass
with torch.no_grad():
snake_case_ = model(**_snake_case )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _snake_case )
snake_case_ = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
@require_torch
class a ( unittest.TestCase , UpperCAmelCase_ ):
snake_case_ = (ConvNextBackbone,) if is_torch_available() else ()
snake_case_ = ConvNextConfig
snake_case_ = False
def A_ ( self : Union[str, Any] ):
snake_case_ = ConvNextModelTester(self )
| 708
|
'''simple docstring'''
a : Dict = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a : Optional[Any] = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
assert len(str(__UpperCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
snake_case_ = year // 100
snake_case_ = (5 * (century % 4) + 2) % 7
snake_case_ = year % 100
snake_case_ = centurian % 12
snake_case_ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
snake_case_ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
snake_case_ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 593
| 0
|
"""simple docstring"""
class SCREAMING_SNAKE_CASE__ :
def __init__(self , _lowercase ):
'''simple docstring'''
__a : Dict = len(_lowercase )
__a : Tuple = [0] * len_array
if len_array > 0:
__a : Tuple = array[0]
for i in range(1 , _lowercase ):
__a : Optional[Any] = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase__(self , _lowercase , _lowercase ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
__a : Dict = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 581
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
lowercase__ = 10
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( _lowerCamelCase : list[int] , _lowerCamelCase : int ):
__a : Optional[int] = 0
__a : Tuple = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Tuple = (left + right) // 3 + 1
__a : List[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__a : Optional[Any] = one_third - 1
elif array[two_third] < target:
__a : List[Any] = two_third + 1
else:
__a : Any = one_third + 1
__a : Union[str, Any] = two_third - 1
else:
return -1
def __magic_name__ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int ):
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__a : Any = (left + right) // 3 + 1
__a : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = input("Enter numbers separated by comma:\n").strip()
lowercase__ = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
lowercase__ = int(input("Enter the number to be found in the list:\n").strip())
lowercase__ = ite_ternary_search(collection, target)
lowercase__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'Iterative search: {target} found at positions: {resulta}')
print(f'Recursive search: {target} found at positions: {resulta}')
else:
print("Not found")
| 581
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase = {
'''configuration_owlvit''': [
'''OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''OwlViTConfig''',
'''OwlViTOnnxConfig''',
'''OwlViTTextConfig''',
'''OwlViTVisionConfig''',
],
'''processing_owlvit''': ['''OwlViTProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''OwlViTFeatureExtractor''']
lowercase = ['''OwlViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OwlViTModel''',
'''OwlViTPreTrainedModel''',
'''OwlViTTextModel''',
'''OwlViTVisionModel''',
'''OwlViTForObjectDetection''',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 714
|
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class lowerCamelCase__ :
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
class lowerCamelCase__ :
def __init__( self ,A ):
UpperCAmelCase = [[] for _ in range(A )]
UpperCAmelCase = size
def __getitem__( self ,A ):
return iter(self._graph[vertex] )
@property
def _UpperCamelCase ( self ):
return self._size
def _UpperCamelCase ( self ,A ,A ,A ):
if weight not in (0, 1):
raise ValueError("""Edge weight must be either 0 or 1.""" )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("""Vertex indexes must be in [0; size).""" )
self._graph[from_vertex].append(Edge(A ,A ) )
def _UpperCamelCase ( self ,A ,A ):
UpperCAmelCase = deque([start_vertex] )
UpperCAmelCase = [None] * self.size
UpperCAmelCase = 0
while queue:
UpperCAmelCase = queue.popleft()
UpperCAmelCase = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase = current_distance + edge.weight
UpperCAmelCase = distances[edge.destination_vertex]
if (
isinstance(A ,A )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError("""No path from start_vertex to finish_vertex.""" )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 341
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCamelCase__ ( snake_case ):
def _UpperCamelCase ( self ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A ,"""hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A ,"""num_attention_heads""" ) )
self.parent.assertTrue(hasattr(A ,"""num_encoder_blocks""" ) )
class lowerCamelCase__ :
def __init__( self ,A ,A=13 ,A=64 ,A=3 ,A=4 ,A=[2, 2, 2, 2] ,A=[8, 4, 2, 1] ,A=[16, 32, 64, 128] ,A=[1, 4, 8, 16] ,A=[1, 2, 4, 8] ,A=True ,A=True ,A="gelu" ,A=0.1 ,A=0.1 ,A=0.02 ,A=3 ,A=None ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = num_channels
UpperCAmelCase = num_encoder_blocks
UpperCAmelCase = sr_ratios
UpperCAmelCase = depths
UpperCAmelCase = hidden_sizes
UpperCAmelCase = downsampling_rates
UpperCAmelCase = num_attention_heads
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = scope
def _UpperCamelCase ( self ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCamelCase ( self ):
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = SegformerModel(config=A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
UpperCAmelCase = UpperCAmelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = SegformerForSemanticSegmentation(A )
model.to(A )
model.eval()
UpperCAmelCase = model(A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ,A ,A ,A ):
UpperCAmelCase = 1
UpperCAmelCase = SegformerForSemanticSegmentation(config=A )
model.to(A )
model.eval()
UpperCAmelCase = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(A )
UpperCAmelCase = model(A ,labels=A )
self.parent.assertGreater(result.loss ,0.0 )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ):
UpperCAmelCase = SegformerModelTester(self )
UpperCAmelCase = SegformerConfigTester(self ,config_class=A )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*A )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def _UpperCamelCase ( self ):
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def _UpperCamelCase ( self ):
pass
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(A )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A )
def _UpperCamelCase ( self ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
UpperCAmelCase = sum(self.model_tester.depths )
self.assertEqual(len(A ) ,A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
UpperCAmelCase = (self.model_tester.image_size // 32) ** 2
UpperCAmelCase = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
UpperCAmelCase = len(A )
# Check attention is always last and order is fine
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
self.assertEqual(out_len + 1 ,len(A ) )
UpperCAmelCase = outputs.attentions
self.assertEqual(len(A ) ,A )
# verify the first attentions (first block, first layer)
UpperCAmelCase = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def _UpperCamelCase ( self ):
def check_hidden_states_output(A ,A ,A ):
UpperCAmelCase = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(A ,A ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(A ) ,A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(A ,A ,A )
def _UpperCamelCase ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(A ):
continue
UpperCAmelCase = model_class(A )
model.to(A )
model.train()
UpperCAmelCase = self._prepare_for_class(A ,A ,return_labels=A )
UpperCAmelCase = model(**A ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _UpperCamelCase ( self ):
pass
@slow
def _UpperCamelCase ( self ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = SegformerModel.from_pretrained(A )
self.assertIsNotNone(A )
def _a ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-4 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,A )
UpperCAmelCase = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,A ,atol=1e-1 ) )
@slow
def _UpperCamelCase ( self ):
# only resize + normalize
UpperCAmelCase = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=A ,align=A ,do_random_crop=A )
UpperCAmelCase = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
A )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=A ,return_tensors="""pt""" )
UpperCAmelCase = encoded_inputs.pixel_values.to(A )
with torch.no_grad():
UpperCAmelCase = model(A )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A ,target_sizes=[(500, 300)] )
UpperCAmelCase = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,A )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A )
UpperCAmelCase = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,A )
| 341
| 1
|
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
def get_matched_characters(lowerCamelCase_ , lowerCamelCase_ ) -> str:
_lowercase : Dict = []
_lowercase : List[Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_lowercase : str = int(max(0 , i - limit ) )
_lowercase : Tuple = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(lowerCamelCase_ )
_lowercase : str = F'''{_stra[0:_stra.index(lowerCamelCase_ )]} {_stra[_stra.index(lowerCamelCase_ ) + 1:]}'''
return "".join(lowerCamelCase_ )
# matching characters
_lowercase : Optional[int] = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[Any] = get_matched_characters(lowerCamelCase_ , lowerCamelCase_ )
_lowercase : List[str] = len(lowerCamelCase_ )
# transposition
_lowercase : Dict = (
len([(ca, ca) for ca, ca in zip(lowerCamelCase_ , lowerCamelCase_ ) if ca != ca] ) // 2
)
if not match_count:
_lowercase : Optional[int] = 0.0
else:
_lowercase : Tuple = (
1
/ 3
* (
match_count / len(lowerCamelCase_ )
+ match_count / len(lowerCamelCase_ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_lowercase : Any = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 354
|
def UpperCamelCase_( lowerCamelCase_ ) -> int:
assert (
isinstance(lowerCamelCase_ , lowerCamelCase_ ) and number_of_steps > 0
), F'''number_of_steps needs to be positive integer, your input {number_of_steps}'''
if number_of_steps == 1:
return 1
_lowercase , _lowercase : Dict = 1, 1
for _ in range(number_of_steps - 1 ):
_lowercase , _lowercase : Tuple = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 354
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
lowercase__ : Dict = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class SCREAMING_SNAKE_CASE (lowerCAmelCase_ ):
lowerCAmelCase = '''trocr'''
lowerCAmelCase = ['''past_key_values''']
lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , _UpperCAmelCase=5_0265 , _UpperCAmelCase=1024 , _UpperCAmelCase=12 , _UpperCAmelCase=16 , _UpperCAmelCase=4096 , _UpperCAmelCase="gelu" , _UpperCAmelCase=512 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=2 , **_UpperCAmelCase , ):
'''simple docstring'''
__A : Any = vocab_size
__A : Optional[int] = d_model
__A : Optional[Any] = decoder_layers
__A : Optional[Any] = decoder_attention_heads
__A : Optional[int] = decoder_ffn_dim
__A : str = activation_function
__A : Dict = max_position_embeddings
__A : Optional[int] = dropout
__A : List[str] = attention_dropout
__A : Union[str, Any] = activation_dropout
__A : int = init_std
__A : Any = decoder_layerdrop
__A : List[str] = use_cache
__A : int = scale_embedding
__A : List[str] = use_learned_position_embeddings
__A : Tuple = layernorm_embedding
super().__init__(
pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , decoder_start_token_id=__lowerCamelCase , **__lowerCamelCase , )
| 8
|
'''simple docstring'''
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
A_ = "."
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
A_ = [
"Assert",
"AssignVariableOp",
"EmptyTensorList",
"MergeV2Checkpoints",
"ReadVariableOp",
"ResourceGather",
"RestoreV2",
"SaveV2",
"ShardedFilename",
"StatefulPartitionedCall",
"StaticRegexFullMatch",
"VarHandleOp",
]
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
snake_case__ : str = SavedModel()
snake_case__ : Union[str, Any] = []
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f:
snake_case__ : Optional[Any] = json.load(__SCREAMING_SNAKE_CASE )['opsets']
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] )
with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ : List[str] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ : Optional[int] = sorted(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__SCREAMING_SNAKE_CASE )
if strict and len(__SCREAMING_SNAKE_CASE ) > 0:
raise Exception(f"Found the following incompatible ops for the opset {opset}:\n" + incompatible_ops )
elif len(__SCREAMING_SNAKE_CASE ) > 0:
print(f"Found the following incompatible ops for the opset {opset}:" )
print(*__SCREAMING_SNAKE_CASE , sep='\n' )
else:
print(f"The saved model {saved_model_path} can properly be converted with ONNX." )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).")
parser.add_argument(
"--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested."
)
parser.add_argument(
"--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model."
)
parser.add_argument(
"--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)"
)
A_ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 270
| 0
|
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__lowerCamelCase = TypeVar('''T''')
class a__ ( Generic[T] ):
def __init__( self : Optional[Any] , lowerCamelCase_ : list[T] , lowerCamelCase_ : Callable[[T, T], T] ):
a_ : Any | T = None
a_ : int = len(lowerCamelCase_ )
a_ : list[T] = [any_type for _ in range(self.N )] + arr
a_ : List[str] = fnc
self.build()
def UpperCAmelCase( self : Union[str, Any] ):
for p in range(self.N - 1 , 0 , -1 ):
a_ : str = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : T ):
p += self.N
a_ : Optional[int] = v
while p > 1:
a_ : int = p // 2
a_ : Dict = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def UpperCAmelCase( self : Dict , lowerCamelCase_ : int , lowerCamelCase_ : int ): # noqa: E741
a_ : List[Any] = l + self.N, r + self.N
a_ : T | None = None
while l <= r:
if l % 2 == 1:
a_ : Union[str, Any] = self.st[l] if res is None else self.fn(lowerCamelCase_ , self.st[l] )
if r % 2 == 0:
a_ : List[Any] = self.st[r] if res is None else self.fn(lowerCamelCase_ , self.st[r] )
a_ : Any = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__lowerCamelCase = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__lowerCamelCase = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__lowerCamelCase = SegmentTree(test_array, min)
__lowerCamelCase = SegmentTree(test_array, max)
__lowerCamelCase = SegmentTree(test_array, lambda a, b: a + b)
def _a ( ):
for i in range(len(__UpperCamelCase ) ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
a_ : Dict = reduce(__UpperCamelCase , test_array[i : j + 1] )
a_ : Dict = reduce(__UpperCamelCase , test_array[i : j + 1] )
a_ : str = reduce(lambda __UpperCamelCase , __UpperCamelCase : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
assert max_range == max_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
assert sum_range == sum_segment_tree.query(__UpperCamelCase , __UpperCamelCase )
test_all_segments()
for index, value in test_updates.items():
__lowerCamelCase = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 717
|
import argparse
import collections
import json
import os
import re
import string
import sys
import numpy as np
__lowerCamelCase = re.compile(R'''\b(a|an|the)\b''', re.UNICODE)
__lowerCamelCase = None
def _a ( ):
a_ : Tuple = argparse.ArgumentParser("""Official evaluation script for SQuAD version 2.0.""" )
parser.add_argument("""data_file""" , metavar="""data.json""" , help="""Input data JSON file.""" )
parser.add_argument("""pred_file""" , metavar="""pred.json""" , help="""Model predictions.""" )
parser.add_argument(
"""--out-file""" , """-o""" , metavar="""eval.json""" , help="""Write accuracy metrics to file (default is stdout).""" )
parser.add_argument(
"""--na-prob-file""" , """-n""" , metavar="""na_prob.json""" , help="""Model estimates of probability of no answer.""" )
parser.add_argument(
"""--na-prob-thresh""" , """-t""" , type=__UpperCamelCase , default=1.0 , help="""Predict \"\" if no-answer probability exceeds this (default = 1.0).""" , )
parser.add_argument(
"""--out-image-dir""" , """-p""" , metavar="""out_images""" , default=__UpperCamelCase , help="""Save precision-recall curves to directory.""" )
parser.add_argument("""--verbose""" , """-v""" , action="""store_true""" )
if len(sys.argv ) == 1:
parser.print_help()
sys.exit(1 )
return parser.parse_args()
def _a ( __UpperCamelCase ):
a_ : Union[str, Any] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : Union[str, Any] = bool(qa["""answers"""]["""text"""] )
return qid_to_has_ans
def _a ( __UpperCamelCase ):
def remove_articles(__UpperCamelCase ):
return ARTICLES_REGEX.sub(""" """ , __UpperCamelCase )
def white_space_fix(__UpperCamelCase ):
return " ".join(text.split() )
def remove_punc(__UpperCamelCase ):
a_ : List[str] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__UpperCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__UpperCamelCase ) ) ) )
def _a ( __UpperCamelCase ):
if not s:
return []
return normalize_answer(__UpperCamelCase ).split()
def _a ( __UpperCamelCase , __UpperCamelCase ):
return int(normalize_answer(__UpperCamelCase ) == normalize_answer(__UpperCamelCase ) )
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : List[Any] = get_tokens(__UpperCamelCase )
a_ : str = get_tokens(__UpperCamelCase )
a_ : Tuple = collections.Counter(__UpperCamelCase ) & collections.Counter(__UpperCamelCase )
a_ : str = sum(common.values() )
if len(__UpperCamelCase ) == 0 or len(__UpperCamelCase ) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks )
if num_same == 0:
return 0
a_ : Any = 1.0 * num_same / len(__UpperCamelCase )
a_ : Optional[int] = 1.0 * num_same / len(__UpperCamelCase )
a_ : List[Any] = (2 * precision * recall) / (precision + recall)
return fa
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Tuple = {}
a_ : Optional[int] = {}
for article in dataset:
for p in article["paragraphs"]:
for qa in p["qas"]:
a_ : str = qa["""id"""]
a_ : Dict = [t for t in qa["""answers"""]["""text"""] if normalize_answer(__UpperCamelCase )]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
a_ : Union[str, Any] = [""""""]
if qid not in preds:
print(F'''Missing prediction for {qid}''' )
continue
a_ : int = preds[qid]
# Take max over all gold answers
a_ : Union[str, Any] = max(compute_exact(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
a_ : int = max(compute_fa(__UpperCamelCase , __UpperCamelCase ) for a in gold_answers )
return exact_scores, fa_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[Any] = {}
for qid, s in scores.items():
a_ : Optional[int] = na_probs[qid] > na_prob_thresh
if pred_na:
a_ : List[Any] = float(not qid_to_has_ans[qid] )
else:
a_ : List[Any] = s
return new_scores
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ):
if not qid_list:
a_ : List[Any] = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores.values() ) / total),
("""f1""", 1_00.0 * sum(fa_scores.values() ) / total),
("""total""", total),
] )
else:
a_ : Tuple = len(__UpperCamelCase )
return collections.OrderedDict(
[
("""exact""", 1_00.0 * sum(exact_scores[k] for k in qid_list ) / total),
("""f1""", 1_00.0 * sum(fa_scores[k] for k in qid_list ) / total),
("""total""", total),
] )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
for k in new_eval:
a_ : Any = new_eval[k]
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
plt.step(__UpperCamelCase , __UpperCamelCase , color="""b""" , alpha=0.2 , where="""post""" )
plt.fill_between(__UpperCamelCase , __UpperCamelCase , step="""post""" , alpha=0.2 , color="""b""" )
plt.xlabel("""Recall""" )
plt.ylabel("""Precision""" )
plt.xlim([0.0, 1.05] )
plt.ylim([0.0, 1.05] )
plt.title(__UpperCamelCase )
plt.savefig(__UpperCamelCase )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None ):
a_ : List[Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
a_ : int = 0.0
a_ : Tuple = 1.0
a_ : Union[str, Any] = 0.0
a_ : List[str] = [1.0]
a_ : int = [0.0]
a_ : Optional[int] = 0.0
for i, qid in enumerate(__UpperCamelCase ):
if qid_to_has_ans[qid]:
true_pos += scores[qid]
a_ : Union[str, Any] = true_pos / float(i + 1 )
a_ : Tuple = true_pos / float(__UpperCamelCase )
if i == len(__UpperCamelCase ) - 1 or na_probs[qid] != na_probs[qid_list[i + 1]]:
# i.e., if we can put a threshold after this point
avg_prec += cur_p * (cur_r - recalls[-1])
precisions.append(__UpperCamelCase )
recalls.append(__UpperCamelCase )
if out_image:
plot_pr_curve(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return {"ap": 1_00.0 * avg_prec}
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if out_image_dir and not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
a_ : Any = sum(1 for v in qid_to_has_ans.values() if v )
if num_true_pos == 0:
return
a_ : Union[str, Any] = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_exact.png""" ) , title="""Precision-Recall curve for Exact Match score""" , )
a_ : Any = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_f1.png""" ) , title="""Precision-Recall curve for F1 score""" , )
a_ : int = {k: float(__UpperCamelCase ) for k, v in qid_to_has_ans.items()}
a_ : Dict = make_precision_recall_eval(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , out_image=os.path.join(__UpperCamelCase , """pr_oracle.png""" ) , title="""Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)""" , )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_exact""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_f1""" )
merge_eval(__UpperCamelCase , __UpperCamelCase , """pr_oracle""" )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
if not qid_list:
return
a_ : List[Any] = [na_probs[k] for k in qid_list]
a_ : List[Any] = np.ones_like(__UpperCamelCase ) / float(len(__UpperCamelCase ) )
plt.hist(__UpperCamelCase , weights=__UpperCamelCase , bins=2_0 , range=(0.0, 1.0) )
plt.xlabel("""Model probability of no-answer""" )
plt.ylabel("""Proportion of dataset""" )
plt.title(F'''Histogram of no-answer probability: {name}''' )
plt.savefig(os.path.join(__UpperCamelCase , F'''na_prob_hist_{name}.png''' ) )
plt.clf()
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Optional[int] = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k] )
a_ : Tuple = num_no_ans
a_ : List[Any] = cur_score
a_ : List[Any] = 0.0
a_ : int = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : na_probs[k] )
for i, qid in enumerate(__UpperCamelCase ):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
a_ : Optional[Any] = scores[qid]
else:
if preds[qid]:
a_ : Optional[int] = -1
else:
a_ : Optional[int] = 0
cur_score += diff
if cur_score > best_score:
a_ : str = cur_score
a_ : int = na_probs[qid]
return 1_00.0 * best_score / len(__UpperCamelCase ), best_thresh
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ , a_ : Optional[int] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ , a_ : Optional[Any] = find_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
a_ : Dict = best_exact
a_ : Optional[Any] = exact_thresh
a_ : Optional[Any] = best_fa
a_ : Union[str, Any] = fa_thresh
def _a ( ):
with open(OPTS.data_file ) as f:
a_ : str = json.load(__UpperCamelCase )
a_ : Optional[int] = dataset_json["""data"""]
with open(OPTS.pred_file ) as f:
a_ : Dict = json.load(__UpperCamelCase )
if OPTS.na_prob_file:
with open(OPTS.na_prob_file ) as f:
a_ : List[str] = json.load(__UpperCamelCase )
else:
a_ : List[str] = {k: 0.0 for k in preds}
a_ : Any = make_qid_to_has_ans(__UpperCamelCase ) # maps qid to True/False
a_ : Any = [k for k, v in qid_to_has_ans.items() if v]
a_ : Union[str, Any] = [k for k, v in qid_to_has_ans.items() if not v]
a_ , a_ : List[str] = get_raw_scores(__UpperCamelCase , __UpperCamelCase )
a_ : int = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : Optional[int] = apply_no_ans_threshold(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.na_prob_thresh )
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase )
if has_ans_qids:
a_ : Dict = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """HasAns""" )
if no_ans_qids:
a_ : List[str] = make_eval_dict(__UpperCamelCase , __UpperCamelCase , qid_list=__UpperCamelCase )
merge_eval(__UpperCamelCase , __UpperCamelCase , """NoAns""" )
if OPTS.na_prob_file:
find_all_best_thresh(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if OPTS.na_prob_file and OPTS.out_image_dir:
run_precision_recall_analysis(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """hasAns""" )
histogram_na_prob(__UpperCamelCase , __UpperCamelCase , OPTS.out_image_dir , """noAns""" )
if OPTS.out_file:
with open(OPTS.out_file , """w""" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
else:
print(json.dumps(__UpperCamelCase , indent=2 ) )
if __name__ == "__main__":
__lowerCamelCase = parse_args()
if OPTS.out_image_dir:
import matplotlib
matplotlib.use('''Agg''')
import matplotlib.pyplot as plt
main()
| 478
| 0
|
"""simple docstring"""
import string
from math import logaa
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = document.translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ).replace('''\n''' ,'''''' )
lowerCamelCase_ = document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ = corpus.lower().translate(
str.maketrans('''''' ,'''''' ,string.punctuation ) ) # strip all punctuation and replace it with ''
lowerCamelCase_ = corpus_without_punctuation.split('''\n''' )
lowerCamelCase_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCAmelCase__ ))
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=False ):
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) ,3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) ,3 )
def lowercase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
return round(tf * idf ,3 )
| 29
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "xlm-roberta-xl"
def __init__(self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_6 , lowerCamelCase__=3_2 , lowerCamelCase__=1_0_2_4_0 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 574
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def __lowerCamelCase ( _lowercase , _lowercase ) -> Tuple:
UpperCAmelCase : int = []
UpperCAmelCase : Optional[Any] = (
F'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'''
""" https://api.github.com/repos/huggingface/transformers/actions/runners"""
)
UpperCAmelCase : Optional[Any] = subprocess.run(_lowercase , shell=_lowercase , stdout=subprocess.PIPE )
UpperCAmelCase : Dict = output.stdout.decode("""utf-8""" )
UpperCAmelCase : Dict = json.loads(_lowercase )
UpperCAmelCase : Any = status["""runners"""]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_lowercase )
# save the result so we can report them on Slack
with open("""offline_runners.txt""" , """w""" ) as fp:
fp.write(json.dumps(_lowercase ) )
if len(_lowercase ) > 0:
UpperCAmelCase : int = """\n""".join([x["""name"""] for x in offline_runners] )
raise ValueError(F'''The following runners are offline:\n{failed}''' )
if __name__ == "__main__":
def __lowerCamelCase ( _lowercase ) -> Any:
return values.split(""",""" )
a : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
a : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Any = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 672
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_xmod": [
"XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XmodConfig",
"XmodOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Dict = [
"XMOD_PRETRAINED_MODEL_ARCHIVE_LIST",
"XmodForCausalLM",
"XmodForMaskedLM",
"XmodForMultipleChoice",
"XmodForQuestionAnswering",
"XmodForSequenceClassification",
"XmodForTokenClassification",
"XmodModel",
"XmodPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89
|
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A = 2
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self: int , *, # begin keyword-only arguments
__A: Any="<s>" , __A: List[str]="<pad>" , __A: Optional[Any]="</s>" , __A: Dict="<unk>" , __A: Any=None , ) -> Tuple:
_A ,_A ,_A ,_A = bos, unk, pad, eos
_A = []
_A = []
_A = {}
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
_A = self.add_symbol(__A )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__A )
_A = len(self.symbols )
def __eq__( self: Any , __A: Any ) -> Optional[Any]:
return self.indices == other.indices
def __getitem__( self: Tuple , __A: Optional[int] ) -> int:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self: Optional[Any] ) -> Optional[Any]:
return len(self.symbols )
def __contains__( self: Dict , __A: List[str] ) -> Union[str, Any]:
return sym in self.indices
@classmethod
def __A ( cls: Tuple , __A: Optional[Any] ) -> Optional[Any]:
_A = cls()
d.add_from_file(__A )
return d
def __A ( self: List[Any] , __A: List[str] , __A: List[Any]=1 , __A: List[Any]=False ) -> Optional[Any]:
if word in self.indices and not overwrite:
_A = self.indices[word]
_A = self.count[idx] + n
return idx
else:
_A = len(self.symbols )
_A = idx
self.symbols.append(__A )
self.count.append(__A )
return idx
def __A ( self: Optional[Any] , __A: Optional[int] ) -> str:
return 0
def __A ( self: List[str] , __A: Optional[Any] ) -> List[Any]:
if isinstance(__A , __A ):
try:
with open(__A , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(__A )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(__A ) )
return
_A = f.readlines()
_A = self._load_meta(__A )
for line in lines[indices_start_line:]:
try:
_A ,_A = line.rstrip().rsplit(''' ''' , 1 )
if field == "#fairseq:overwrite":
_A = True
_A ,_A = line.rsplit(''' ''' , 1 )
else:
_A = False
_A = int(__A )
_A = line
if word in self and not overwrite:
raise RuntimeError(
'''Duplicate word found when loading Dictionary: \'{}\'. '''
'''Duplicate words can overwrite earlier ones by adding the '''
'''#fairseq:overwrite flag at the end of the corresponding row '''
'''in the dictionary file. If using the Camembert model, please '''
'''download an updated copy of the model file.'''.format(__A ) )
self.add_symbol(__A , n=__A , overwrite=__A )
except ValueError:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = dict((re.sub(R'''@@$''' , '''''' , _lowercase ), v) if k.endswith('''@@''' ) else (re.sub(R'''$''' , '''</w>''' , _lowercase ), v) for k, v in d.items() )
_A = '''<s> <pad> </s> <unk>'''.split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
_A = d[k] # restore
return da
def __A ( _lowercase , _lowercase ):
'''simple docstring'''
if not os.path.exists(_lowercase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(_lowercase , exist_ok=_lowercase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
_A = os.path.join(_lowercase , '''checkpoint.pt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
_A = torch.load(_lowercase , map_location='''cpu''' )
_A = chkpt['''cfg''']['''model''']
# dicts
_A = os.path.join(_lowercase , '''dict.txt''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
_A = Dictionary.load(_lowercase )
_A = rewrite_dict_keys(src_dict.indices )
_A = len(_lowercase )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# merges_file (bpecodes)
_A = os.path.join(_lowercase , '''bpecodes''' )
if not os.path.isfile(_lowercase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
_A = os.path.join(_lowercase , VOCAB_FILES_NAMES['''merges_file'''] )
shutil.copyfile(_lowercase , _lowercase )
# model config
_A = os.path.join(_lowercase , '''config.json''' )
_A = {
'''activation_dropout''': args['''activation_dropout'''],
'''architectures''': ['''BioGptForCausalLM'''],
'''attention_probs_dropout_prob''': args['''attention_dropout'''],
'''bos_token_id''': 0,
'''eos_token_id''': 2,
'''hidden_act''': args['''activation_fn'''],
'''hidden_dropout_prob''': args['''dropout'''],
'''hidden_size''': args['''decoder_embed_dim'''],
'''initializer_range''': 0.02,
'''intermediate_size''': args['''decoder_ffn_embed_dim'''],
'''layer_norm_eps''': 1e-12,
'''layerdrop''': args['''decoder_layerdrop'''],
'''max_position_embeddings''': args['''max_target_positions'''],
'''model_type''': '''biogpt''',
'''num_attention_heads''': args['''decoder_attention_heads'''],
'''num_hidden_layers''': args['''decoder_layers'''],
'''pad_token_id''': 1,
'''scale_embedding''': not args['''no_scale_embedding'''],
'''tie_word_embeddings''': args['''share_decoder_input_output_embed'''],
'''vocab_size''': src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# tokenizer config
_A = os.path.join(_lowercase , _lowercase )
_A = {
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
'''model_max_length''': 10_24,
'''pad_token''': '''<pad>''',
'''special_tokens_map_file''': None,
'''tokenizer_class''': '''BioGptTokenizer''',
'''unk_token''': '''<unk>''',
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(_lowercase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(_lowercase , ensure_ascii=_lowercase , indent=_lowercase ) )
# model
_A = chkpt['''model''']
# remove unneeded keys
_A = [
'''decoder.version''',
]
for k in ignore_keys:
model_state_dict.pop(_lowercase , _lowercase )
_A = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith('''output_projection.weight''' ):
_A = model_state_dict.pop(_lowercase )
else:
_A = model_state_dict.pop(_lowercase )
_A = BioGptConfig.from_pretrained(_lowercase )
_A = BioGptForCausalLM(_lowercase )
# check that it loads ok
model_new.load_state_dict(_lowercase )
# save
_A = os.path.join(_lowercase , _lowercase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(_lowercase , _lowercase )
print('''Conversion is done!''' )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--biogpt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 484
| 0
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def snake_case_ (_a : BertModel , _a : str , _a : str ):
UpperCAmelCase = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
UpperCAmelCase = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(UpperCamelCase__ ):
os.makedirs(UpperCamelCase__ )
UpperCAmelCase = model.state_dict()
def to_tf_var_name(_a : str ):
for patt, repl in iter(UpperCamelCase__ ):
UpperCAmelCase = name.replace(UpperCamelCase__ , UpperCamelCase__ )
return F"bert/{name}"
def create_tf_var(_a : np.ndarray , _a : str , _a : tf.Session ):
UpperCAmelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCAmelCase = tf.get_variable(dtype=UpperCamelCase__ , shape=tensor.shape , name=UpperCamelCase__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(UpperCamelCase__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCAmelCase = to_tf_var_name(UpperCamelCase__ )
UpperCAmelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCAmelCase = torch_tensor.T
UpperCAmelCase = create_tf_var(tensor=UpperCamelCase__ , name=UpperCamelCase__ , session=UpperCamelCase__ )
tf.keras.backend.set_value(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase = session.run(UpperCamelCase__ )
print(F"Successfully created {tf_name}: {np.allclose(UpperCamelCase__ , UpperCamelCase__ )}" )
UpperCAmelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , model_name.replace('''-''' , '''_''' ) + '''.ckpt''' ) )
def snake_case_ (_a : Optional[int]=None ):
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''model name e.g. bert-base-uncased''' )
parser.add_argument(
'''--cache_dir''' , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory containing pytorch model''' )
parser.add_argument('''--pytorch_model_path''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''/path/to/<pytorch-model-name>.bin''' )
parser.add_argument('''--tf_cache_dir''' , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Directory in which to save tensorflow model''' )
UpperCAmelCase = parser.parse_args(UpperCamelCase__ )
UpperCAmelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=UpperCamelCase__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 715
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
A =logging.getLogger(__name__)
A ={'facebook/bart-base': BartForConditionalGeneration}
A ={'facebook/bart-base': BartTokenizer}
def snake_case_ ():
UpperCAmelCase = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' )
parser.add_argument(
'''--validation_file''' , type=_a , default=_a , help='''A csv or a json file containing the validation data.''' )
parser.add_argument(
'''--max_length''' , type=_a , default=5 , help='''The maximum total input sequence length after tokenization.''' , )
parser.add_argument(
'''--num_beams''' , type=_a , default=_a , help=(
'''Number of beams to use for evaluation. This argument will be '''
'''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'''
) , )
parser.add_argument(
'''--model_name_or_path''' , type=_a , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=_a , )
parser.add_argument(
'''--config_name''' , type=_a , default=_a , help='''Pretrained config name or path if not the same as model_name''' , )
parser.add_argument(
'''--device''' , type=_a , default='''cpu''' , help='''Device where the model will be run''' , )
parser.add_argument('''--output_file_path''' , type=_a , default=_a , help='''Where to store the final ONNX file.''' )
UpperCAmelCase = parser.parse_args()
return args
def snake_case_ (_a : Tuple , _a : str="cpu" ):
UpperCAmelCase = model_dict[model_name].from_pretrained(_a ).to(_a )
UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_a )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase = 0
UpperCAmelCase = None
UpperCAmelCase = 0
return huggingface_model, tokenizer
def snake_case_ (_a : Optional[int] , _a : List[str] , _a : str , _a : Optional[Any] , _a : str ):
model.eval()
UpperCAmelCase = None
UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_a ) )
with torch.no_grad():
UpperCAmelCase = '''My friends are cool but they eat too many carbs.'''
UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors='''pt''' ).to(model.device )
UpperCAmelCase = model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=_a , max_length=_a , early_stopping=_a , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_a , (
inputs['''input_ids'''],
inputs['''attention_mask'''],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _a , opset_version=1_4 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={
'''input_ids''': {0: '''batch''', 1: '''seq'''},
'''output_ids''': {0: '''batch''', 1: '''seq_out'''},
} , example_outputs=_a , )
logger.info('''Model exported to {}'''.format(_a ) )
UpperCAmelCase = remove_dup_initializers(os.path.abspath(_a ) )
logger.info('''Deduplicated and optimized model written to {}'''.format(_a ) )
UpperCAmelCase = onnxruntime.InferenceSession(_a )
UpperCAmelCase = ort_sess.run(
_a , {
'''input_ids''': inputs['''input_ids'''].cpu().numpy(),
'''attention_mask''': inputs['''attention_mask'''].cpu().numpy(),
'''num_beams''': np.array(_a ),
'''max_length''': np.array(_a ),
'''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 )
logger.info('''Model outputs from torch and ONNX Runtime are similar.''' )
logger.info('''Success.''' )
def snake_case_ ():
UpperCAmelCase = parse_args()
UpperCAmelCase = 5
UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase = torch.device(args.device )
UpperCAmelCase , UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _a )
if model.config.decoder_start_token_id is None:
raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' )
model.to(_a )
if args.max_length:
UpperCAmelCase = args.max_length
if args.num_beams:
UpperCAmelCase = args.num_beams
if args.output_file_path:
UpperCAmelCase = args.output_file_path
else:
UpperCAmelCase = '''BART.onnx'''
logger.info('''Exporting model to ONNX''' )
export_and_validate_model(_a , _a , _a , _a , _a )
if __name__ == "__main__":
main()
| 358
| 0
|
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
snake_case = Lock()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Union[str, Any] , snake_case__ :Tuple , snake_case__ :Any , snake_case__ :Dict , snake_case__ :Optional[int] , snake_case__ :List[str] ) -> Optional[Any]:
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case__ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
_lowercase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
_lowercase = min(snake_case__ , snake_case__ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case__ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
_lowercase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
_lowercase = max(snake_case__ , snake_case__ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :str ) -> Dict:
_lowercase = []
_lowercase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
for i in range(1 , len(snake_case__ ) - 1 ):
_lowercase = Pipe()
_lowercase = Pipe()
process_array_.append(
Process(
target=snake_case__ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
_lowercase = temp_rs
_lowercase = temp_rr
process_array_.append(
Process(
target=snake_case__ , args=(
len(snake_case__ ) - 1,
arr[len(snake_case__ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case__ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case__ ) ):
_lowercase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
_lowercase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*snake_case__ )
_lowercase = odd_even_transposition(snake_case__ )
print('Sorted List\n' )
print(*snake_case__ )
if __name__ == "__main__":
main()
| 67
|
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCamelCase__ = BarthezTokenizer
UpperCamelCase__ = BarthezTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = True
def _A( self ):
super().setUp()
lowercase =BarthezTokenizerFast.from_pretrained('''moussaKam/mbarthez''' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case_ )
lowercase =tokenizer
def _A( self ):
lowercase ='''<pad>'''
lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def _A( self ):
lowercase =list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(snake_case_ ) , 10_11_22 )
def _A( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def _A( self ):
lowercase =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowercase =[0, 57, 30_18, 7_03_07, 91, 2]
lowercase =self.tokenizer(
snake_case_ , max_length=len(snake_case_ ) , padding=snake_case_ , truncation=snake_case_ , return_tensors='''pt''' )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowercase =batch.input_ids.tolist()[0]
self.assertListEqual(snake_case_ , snake_case_ )
def _A( self ):
if not self.test_rust_tokenizer:
return
lowercase =self.get_tokenizer()
lowercase =self.get_rust_tokenizer()
lowercase ='''I was born in 92000, and this is falsé.'''
lowercase =tokenizer.tokenize(snake_case_ )
lowercase =rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
lowercase =self.get_rust_tokenizer()
lowercase =tokenizer.encode(snake_case_ )
lowercase =rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
@slow
def _A( self ):
# fmt: off
lowercase ={'''input_ids''': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowercase =[
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name='''moussaKam/mbarthez''' , revision='''c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6''' , sequences=snake_case_ , )
| 72
| 0
|
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Tuple = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : Dict , lowerCamelCase_ : Any , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_snake_case : Tuple = ObjectDetectionPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __UpperCAmelCase ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
'''simple docstring'''
_snake_case : Tuple = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(lowerCamelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_ , {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
} , )
import datasets
_snake_case : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
_snake_case : str = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
_snake_case : str = object_detector(lowerCamelCase_ , threshold=0.0 )
self.assertEqual(len(lowerCamelCase_ ) , len(lowerCamelCase_ ) )
for outputs in batch_outputs:
self.assertGreater(len(lowerCamelCase_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
lowerCamelCase_ , {
'score': ANY(lowerCamelCase_ ),
'label': ANY(lowerCamelCase_ ),
'box': {'xmin': ANY(lowerCamelCase_ ), 'ymin': ANY(lowerCamelCase_ ), 'xmax': ANY(lowerCamelCase_ ), 'ymax': ANY(lowerCamelCase_ )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __UpperCAmelCase ( self : Optional[int] ):
'''simple docstring'''
pass
@require_torch
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = 'hf-internal-testing/tiny-detr-mobilenetsv3'
_snake_case : Any = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
_snake_case : List[str] = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
_snake_case : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
_snake_case : Optional[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case : Optional[int] = 'facebook/detr-resnet-50'
_snake_case : Tuple = AutoModelForObjectDetection.from_pretrained(lowerCamelCase_ )
_snake_case : Optional[int] = AutoFeatureExtractor.from_pretrained(lowerCamelCase_ )
_snake_case : Tuple = ObjectDetectionPipeline(model=lowerCamelCase_ , feature_extractor=lowerCamelCase_ )
_snake_case : Optional[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = 'facebook/detr-resnet-50'
_snake_case : Any = pipeline('object-detection' , model=lowerCamelCase_ )
_snake_case : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
_snake_case : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __UpperCAmelCase ( self : List[str] ):
'''simple docstring'''
_snake_case : str = 0.9985
_snake_case : Optional[Any] = 'facebook/detr-resnet-50'
_snake_case : str = pipeline('object-detection' , model=lowerCamelCase_ )
_snake_case : Any = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=lowerCamelCase_ )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
_snake_case : Optional[int] = 0.9993
_snake_case : List[Any] = pipeline('object-detection' , model=lowerCamelCase_ , threshold=lowerCamelCase_ )
_snake_case : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , )
| 652
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ : Optional[int] = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Any = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652
| 1
|
"""simple docstring"""
import numpy as np
def _snake_case ( UpperCamelCase : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 160
|
"""simple docstring"""
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : int ):
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase__ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 134
| 0
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowerCamelCase : Optional[int] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
__lowerCamelCase : Dict = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , bootstrap_aggregation=lowerCAmelCase_ , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = "rougeLsum"
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = ["rouge1", "rouge2", "rougeL"]
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ , rouge_keys=lowerCAmelCase_ )
assert score_sep == score_no_sep
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
lowercase = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ ) == calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , newline_sep=lowerCAmelCase_ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
lowercase = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=["rougeLsum"] , newline_sep=lowerCAmelCase_ )["rougeLsum"]
lowercase = calculate_rouge(lowerCAmelCase_ , lowerCAmelCase_ , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Path("examples/seq2seq/test_data/wmt_en_ro" )
lowercase = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=lowerCAmelCase_ )
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
| 459
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__(self : Dict , A__ : str , A__ : int=3 , A__ : Dict=3_2 , A__ : str=3 , A__ : str=1_0 , A__ : Optional[int]=[1_0, 2_0, 3_0, 4_0] , A__ : Tuple=[1, 1, 2, 1] , A__ : int=True , A__ : List[Any]=True , A__ : List[Any]="relu" , A__ : Any=3 , A__ : Any=None , ) -> Tuple:
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = embeddings_size
lowercase = hidden_sizes
lowercase = depths
lowercase = is_training
lowercase = use_labels
lowercase = hidden_act
lowercase = num_labels
lowercase = scope
lowercase = len(A__ )
def UpperCAmelCase__ (self : str ) -> List[str]:
lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] , self.num_labels )
lowercase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ (self : Dict , A__ : Union[str, Any] , A__ : Optional[int] , A__ : Dict ) -> str:
lowercase = RegNetModel(config=A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def UpperCAmelCase__ (self : List[str] , A__ : List[str] , A__ : Union[str, Any] , A__ : str ) -> Dict:
lowercase = self.num_labels
lowercase = RegNetForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self : Any ) -> Union[str, Any]:
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
UpperCAmelCase : str = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
UpperCAmelCase : Dict = (
{'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : int = False
UpperCAmelCase : Tuple = False
UpperCAmelCase : Tuple = False
def UpperCAmelCase__ (self : Optional[int] ) -> Tuple:
lowercase = RegNetModelTester(self )
lowercase = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCAmelCase__ (self : List[Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ (self : Optional[Any] ) -> int:
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase__ (self : int ) -> Optional[int]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase__ (self : List[Any] ) -> Optional[Any]:
pass
def UpperCAmelCase__ (self : Any ) -> str:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__ )
lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Union[str, Any]:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCAmelCase__ (self : List[str] ) -> Tuple:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def UpperCAmelCase__ (self : Optional[Any] ) -> List[str]:
def check_hidden_states_output(A__ : Optional[Any] , A__ : List[str] , A__ : Tuple ):
lowercase = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ , A__ ) )
lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase = layer_type
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ , A__ , A__ )
def UpperCAmelCase__ (self : List[Any] ) -> Tuple:
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCAmelCase__ (self : Tuple ) -> Any:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = RegNetModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCAmelCase_ ( ):
"""simple docstring"""
lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ (self : Optional[int] ) -> Union[str, Any]:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ (self : List[str] ) -> int:
lowercase = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase = self.default_image_processor
lowercase = prepare_img()
lowercase = image_processor(images=A__ , return_tensors="pt" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase = model(**A__ )
# verify the logits
lowercase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase = torch.tensor([-0.4_1_8_0, -1.5_0_5_1, -3.4_8_3_6] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1e-4 ) )
| 459
| 1
|
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
__UpperCAmelCase = True
except ImportError:
__UpperCAmelCase = False
try:
from torch.hub import _get_torch_home
__UpperCAmelCase = _get_torch_home()
except ImportError:
__UpperCAmelCase = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
__UpperCAmelCase = os.path.join(torch_cache_home, '''transformers''')
__UpperCAmelCase = """https://cdn.huggingface.co"""
__UpperCAmelCase = """https://s3.amazonaws.com/models.huggingface.co/bert"""
__UpperCAmelCase = """/""".join(str(Path(__file__).resolve()).split('''/''')[:-1])
__UpperCAmelCase = os.path.join(PATH, '''config.yaml''')
__UpperCAmelCase = os.path.join(PATH, '''attributes.txt''')
__UpperCAmelCase = os.path.join(PATH, '''objects.txt''')
__UpperCAmelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
__UpperCAmelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
__UpperCAmelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
__UpperCAmelCase = """pytorch_model.bin"""
__UpperCAmelCase = """config.yaml"""
def UpperCamelCase ( snake_case__ : List[Any]=OBJECTS , snake_case__ : str=ATTRIBUTES ) -> Optional[Any]:
UpperCamelCase : List[Any] = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_classes.append(object.split(',' )[0].lower().strip() )
UpperCamelCase : Dict = []
with open(_lowercase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(',' )[0].lower().strip() )
return vg_classes, vg_attrs
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]:
UpperCamelCase : int = OrderedDict()
with open(_lowercase , 'rb' ) as f:
UpperCamelCase : Dict = pkl.load(_lowercase )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
UpperCamelCase : Dict = ckp.pop(_lowercase )
if isinstance(_lowercase , np.ndarray ):
UpperCamelCase : Union[str, Any] = torch.tensor(_lowercase )
else:
assert isinstance(_lowercase , torch.tensor ), type(_lowercase )
UpperCamelCase : List[str] = v
return r
class lowerCAmelCase_ :
UpperCAmelCase__ : List[Any] = {}
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = "root", SCREAMING_SNAKE_CASE_=0 ) -> Union[str, Any]:
UpperCamelCase : List[str] = name
UpperCamelCase : Optional[int] = level
UpperCamelCase : Optional[Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
UpperCamelCase : str = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Optional[Any] = Config(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_, level=level + 1 )
UpperCamelCase : Any = v
setattr(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = d
def __repr__( self ) -> Any:
return str(list((self._pointer.keys()) ) )
def __setattr__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : List[Any] = val
UpperCamelCase : List[Any] = val
UpperCamelCase : List[str] = key.split('.' )
UpperCamelCase : Tuple = len(SCREAMING_SNAKE_CASE_ ) - 1
UpperCamelCase : int = self._pointer
if len(SCREAMING_SNAKE_CASE_ ) > 1:
for i, l in enumerate(SCREAMING_SNAKE_CASE_ ):
if hasattr(self, SCREAMING_SNAKE_CASE_ ) and isinstance(getattr(self, SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ ):
setattr(getattr(self, SCREAMING_SNAKE_CASE_ ), '.'.join(levels[i:] ), SCREAMING_SNAKE_CASE_ )
if l == last_level:
UpperCamelCase : Any = val
else:
UpperCamelCase : Tuple = pointer[l]
def snake_case_ ( self ) -> Optional[int]:
return self._pointer
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
with open(F"""{file_name}""", 'w' ) as stream:
dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
with open(F"""{file_name}""", 'w' ) as stream:
json.dump(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
@staticmethod
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
with open(SCREAMING_SNAKE_CASE_ ) as stream:
UpperCamelCase : Optional[int] = load(SCREAMING_SNAKE_CASE_, Loader=SCREAMING_SNAKE_CASE_ )
return data
def __str__( self ) -> Any:
UpperCamelCase : int = " "
if self._name != "root":
UpperCamelCase : Tuple = F"""{t * (self._level-1)}{self._name}:\n"""
else:
UpperCamelCase : int = ""
UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
r += F"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += F"""{t * (self._level)}{k}: {v} ({type(SCREAMING_SNAKE_CASE_ ).__name__})\n"""
UpperCamelCase : List[str] = level
return r[:-1]
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase : List[str] = cls.get_config_dict(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
return cls(SCREAMING_SNAKE_CASE_ )
@classmethod
def snake_case_ ( cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = kwargs.pop('cache_dir', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = kwargs.pop('force_download', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = kwargs.pop('resume_download', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = kwargs.pop('proxies', SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = kwargs.pop('local_files_only', SCREAMING_SNAKE_CASE_ )
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Dict = os.path.join(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
elif os.path.isfile(SCREAMING_SNAKE_CASE_ ) or is_remote_url(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = pretrained_model_name_or_path
else:
UpperCamelCase : List[str] = hf_bucket_url(SCREAMING_SNAKE_CASE_, filename=SCREAMING_SNAKE_CASE_, use_cdn=SCREAMING_SNAKE_CASE_ )
try:
# Load from URL or cache if already cached
UpperCamelCase : Union[str, Any] = cached_path(
SCREAMING_SNAKE_CASE_, cache_dir=SCREAMING_SNAKE_CASE_, force_download=SCREAMING_SNAKE_CASE_, proxies=SCREAMING_SNAKE_CASE_, resume_download=SCREAMING_SNAKE_CASE_, local_files_only=SCREAMING_SNAKE_CASE_, )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
UpperCamelCase : List[Any] = Config.load_yaml(SCREAMING_SNAKE_CASE_ )
except EnvironmentError:
UpperCamelCase : Optional[Any] = "Can't load config for"
raise EnvironmentError(SCREAMING_SNAKE_CASE_ )
if resolved_config_file == config_file:
print('loading configuration file from path' )
else:
print('loading configuration file cache' )
return Config.load_yaml(SCREAMING_SNAKE_CASE_ ), kwargs
def UpperCamelCase ( snake_case__ : str ) -> Any:
UpperCamelCase : List[str] = torch.load('dump.pt' , map_location=in_tensor.device )
UpperCamelCase : List[str] = in_tensor.numpy()
UpperCamelCase : Any = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(_lowercase , _lowercase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %"""
" element-wise mismatch"
)
raise Exception('tensors are all good' )
# Hugging face functions below
def UpperCamelCase ( snake_case__ : Any ) -> Tuple:
UpperCamelCase : Optional[Any] = urlparse(_lowercase )
return parsed.scheme in ("http", "https")
def UpperCamelCase ( snake_case__ : str , snake_case__ : str , snake_case__ : Any=True ) -> Tuple:
UpperCamelCase : Optional[int] = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
UpperCamelCase : int = "/" not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : int=None , snake_case__ : Tuple=0 , snake_case__ : List[str]=None , ) -> str:
UpperCamelCase : List[Any] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_lowercase , _lowercase ):
ua += "; " + "; ".join('{}/{}'.format(_lowercase , _lowercase ) for k, v in user_agent.items() )
elif isinstance(_lowercase , _lowercase ):
ua += "; " + user_agent
UpperCamelCase : int = {"user-agent": ua}
if resume_size > 0:
UpperCamelCase : Tuple = "bytes=%d-" % (resume_size,)
UpperCamelCase : Union[str, Any] = requests.get(_lowercase , stream=_lowercase , proxies=_lowercase , headers=_lowercase )
if response.status_code == 416: # Range not satisfiable
return
UpperCamelCase : List[str] = response.headers.get('Content-Length' )
UpperCamelCase : Union[str, Any] = resume_size + int(_lowercase ) if content_length is not None else None
UpperCamelCase : str = tqdm(
unit='B' , unit_scale=_lowercase , total=_lowercase , initial=_lowercase , desc='Downloading' , )
for chunk in response.iter_content(chunk_size=1024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_lowercase ) )
temp_file.write(_lowercase )
progress.close()
def UpperCamelCase ( snake_case__ : List[str] , snake_case__ : Any=None , snake_case__ : Any=False , snake_case__ : str=None , snake_case__ : Union[str, Any]=10 , snake_case__ : Optional[int]=False , snake_case__ : str=None , snake_case__ : List[Any]=False , ) -> List[Any]:
if cache_dir is None:
UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
UpperCamelCase : int = str(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
UpperCamelCase : Union[str, Any] = None
if not local_files_only:
try:
UpperCamelCase : str = requests.head(_lowercase , allow_redirects=_lowercase , proxies=_lowercase , timeout=_lowercase )
if response.status_code == 200:
UpperCamelCase : Any = response.headers.get('ETag' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
UpperCamelCase : int = url_to_filename(_lowercase , _lowercase )
# get cache path to put the file
UpperCamelCase : Dict = os.path.join(_lowercase , _lowercase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_lowercase ):
return cache_path
else:
UpperCamelCase : Tuple = [
file
for file in fnmatch.filter(os.listdir(_lowercase ) , filename + '.*' )
if not file.endswith('.json' ) and not file.endswith('.lock' )
]
if len(_lowercase ) > 0:
return os.path.join(_lowercase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'Cannot find the requested files in the cached path and outgoing traffic has been'
' disabled. To enable model look-ups and downloads online, set \'local_files_only\''
' to False.' )
return None
# From now on, etag is not None.
if os.path.exists(_lowercase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(_lowercase ):
# If the download just completed while the lock was activated.
if os.path.exists(_lowercase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
UpperCamelCase : Dict = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(_lowercase , 'a+b' ) as f:
yield f
UpperCamelCase : Optional[int] = _resumable_file_manager
if os.path.exists(_lowercase ):
UpperCamelCase : Any = os.stat(_lowercase ).st_size
else:
UpperCamelCase : str = 0
else:
UpperCamelCase : Any = partial(tempfile.NamedTemporaryFile , dir=_lowercase , delete=_lowercase )
UpperCamelCase : str = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'%s not found in cache or force_download set to True, downloading to %s' , _lowercase , temp_file.name , )
http_get(
_lowercase , _lowercase , proxies=_lowercase , resume_size=_lowercase , user_agent=_lowercase , )
os.replace(temp_file.name , _lowercase )
UpperCamelCase : Tuple = {"url": url, "etag": etag}
UpperCamelCase : Optional[int] = cache_path + ".json"
with open(_lowercase , 'w' ) as meta_file:
json.dump(_lowercase , _lowercase )
return cache_path
def UpperCamelCase ( snake_case__ : int , snake_case__ : Any=None ) -> Optional[int]:
UpperCamelCase : Tuple = url.encode('utf-8' )
UpperCamelCase : Optional[int] = shaaaa(_lowercase )
UpperCamelCase : Tuple = url_hash.hexdigest()
if etag:
UpperCamelCase : Any = etag.encode('utf-8' )
UpperCamelCase : List[Any] = shaaaa(_lowercase )
filename += "." + etag_hash.hexdigest()
if url.endswith('.h5' ):
filename += ".h5"
return filename
def UpperCamelCase ( snake_case__ : str , snake_case__ : List[Any]=None , snake_case__ : str=False , snake_case__ : Dict=None , snake_case__ : Optional[Any]=False , snake_case__ : Optional[int]=None , snake_case__ : List[Any]=False , snake_case__ : Dict=False , snake_case__ : Optional[Any]=False , ) -> str:
if cache_dir is None:
UpperCamelCase : Tuple = TRANSFORMERS_CACHE
if isinstance(_lowercase , _lowercase ):
UpperCamelCase : Dict = str(_lowercase )
if isinstance(_lowercase , _lowercase ):
UpperCamelCase : int = str(_lowercase )
if is_remote_url(_lowercase ):
# URL, so get it from the cache (downloading if necessary)
UpperCamelCase : Any = get_from_cache(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , user_agent=_lowercase , local_files_only=_lowercase , )
elif os.path.exists(_lowercase ):
# File, and it exists.
UpperCamelCase : Optional[Any] = url_or_filename
elif urlparse(_lowercase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('file {} not found'.format(_lowercase ) )
else:
# Something unknown
raise ValueError('unable to parse {} as a URL or as a local path'.format(_lowercase ) )
if extract_compressed_file:
if not is_zipfile(_lowercase ) and not tarfile.is_tarfile(_lowercase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
UpperCamelCase : Tuple = os.path.split(_lowercase )
UpperCamelCase : Any = output_file.replace('.' , '-' ) + "-extracted"
UpperCamelCase : str = os.path.join(_lowercase , _lowercase )
if os.path.isdir(_lowercase ) and os.listdir(_lowercase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
UpperCamelCase : int = output_path + ".lock"
with FileLock(_lowercase ):
shutil.rmtree(_lowercase , ignore_errors=_lowercase )
os.makedirs(_lowercase )
if is_zipfile(_lowercase ):
with ZipFile(_lowercase , 'r' ) as zip_file:
zip_file.extractall(_lowercase )
zip_file.close()
elif tarfile.is_tarfile(_lowercase ):
UpperCamelCase : Optional[int] = tarfile.open(_lowercase )
tar_file.extractall(_lowercase )
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(_lowercase ) )
return output_path_extracted
return output_path
def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : int="," ) -> Union[str, Any]:
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
with open(_lowercase ) as f:
UpperCamelCase : Tuple = eval(f.read() )
else:
UpperCamelCase : Tuple = requests.get(_lowercase )
try:
UpperCamelCase : Dict = requests.json()
except Exception:
UpperCamelCase : Dict = req.content.decode()
assert data is not None, "could not connect"
try:
UpperCamelCase : Union[str, Any] = eval(_lowercase )
except Exception:
UpperCamelCase : str = data.split('\n' )
req.close()
return data
def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[int]:
UpperCamelCase : Any = requests.get(_lowercase )
UpperCamelCase : str = np.array(Image.open(BytesIO(response.content ) ) )
return img
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> int:
UpperCamelCase : Union[str, Any] = url.split('/' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_lowercase )
with open(_lowercase , 'rb' ) as stream:
UpperCamelCase : int = pkl.load(_lowercase )
UpperCamelCase : List[Any] = weights.pop('model' )
UpperCamelCase : List[str] = {}
for k, v in model.items():
UpperCamelCase : str = torch.from_numpy(_lowercase )
if "running_var" in k:
UpperCamelCase : Any = torch.tensor([0] )
UpperCamelCase : int = k.replace('running_var' , 'num_batches_tracked' )
UpperCamelCase : Tuple = zero
return new
def UpperCamelCase ( ) -> Tuple:
print(F"""{os.path.abspath(os.path.join(_lowercase , os.pardir ) )}/demo.ipynb""" )
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : List[Any]="RGB" ) -> str:
assert isinstance(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
UpperCamelCase : str = cva.imread(_lowercase )
else:
UpperCamelCase : Optional[int] = get_image_from_url(_lowercase )
assert img is not None, F"""could not connect to: {im}"""
UpperCamelCase : Tuple = cva.cvtColor(_lowercase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
UpperCamelCase : Dict = img[:, :, ::-1]
return img
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=1 ) -> Any:
return (images[i : i + batch] for i in range(0 , len(_lowercase ) , _lowercase ))
| 40
|
"""simple docstring"""
def lowercase_ ( _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(2_7))
print(perfect_cube(4))
| 595
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Dict = {
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/resolve/main/config.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/config.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/config.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json''',
}
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : int = 'bloom'
__a : Optional[Any] = ['past_key_values']
__a : Union[str, Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , __a=250880 , __a=64 , __a=2 , __a=8 , __a=1E-5 , __a=0.02 , __a=True , __a=1 , __a=2 , __a=False , __a=0.0 , __a=0.0 , __a=1 , __a=False , **__a , ):
__lowerCamelCase : Union[str, Any] = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCamelCase : List[str] = kwargs.pop('n_embed' , __a )
__lowerCamelCase : Optional[Any] = hidden_size if n_embed is None else n_embed
__lowerCamelCase : Any = n_layer
__lowerCamelCase : str = n_head
__lowerCamelCase : Tuple = layer_norm_epsilon
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Any = use_cache
__lowerCamelCase : str = pretraining_tp
__lowerCamelCase : Optional[Any] = apply_residual_connection_post_layernorm
__lowerCamelCase : Optional[int] = hidden_dropout
__lowerCamelCase : Any = attention_dropout
__lowerCamelCase : str = bos_token_id
__lowerCamelCase : Tuple = eos_token_id
__lowerCamelCase : Optional[Any] = slow_but_exact
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
class __lowercase( lowercase__ ):
'''simple docstring'''
__a : Optional[int] = version.parse('1.12' )
def __init__( self , __a , __a = "default" , __a = None , __a = False , ):
super().__init__(__a , task=__a , patching_specs=__a , use_past=__a )
if not getattr(self._config , 'pad_token_id' , __a ):
# TODO: how to do that better?
__lowerCamelCase : List[str] = 0
@property
def snake_case_ ( self ):
__lowerCamelCase : List[str] = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(__a , direction='inputs' , inverted_values_shape=__a )
__lowerCamelCase : int = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCamelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def snake_case_ ( self ):
return self._config.n_layer
@property
def snake_case_ ( self ):
return self._config.n_head
@property
def snake_case_ ( self ):
return 1E-3
def snake_case_ ( self , __a , __a = -1 , __a = -1 , __a = False , __a = None , ):
__lowerCamelCase : Optional[Any] = super(__a , self ).generate_dummy_inputs(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
# We need to order the input in the way they appears in the forward()
__lowerCamelCase : int = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCamelCase : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCamelCase : Tuple = seqlen + 2
__lowerCamelCase : str = self._config.hidden_size // self.num_attention_heads
__lowerCamelCase : List[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCamelCase : List[str] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCamelCase : int = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(self.num_layers )
]
__lowerCamelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
__lowerCamelCase : Optional[Any] = ordered_inputs['attention_mask'].dtype
__lowerCamelCase : List[str] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__a , __a , dtype=__a )] , dim=1 )
return ordered_inputs
@property
def snake_case_ ( self ):
return 13
| 714
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
a_ : Tuple = '''hf-internal-testing/tiny-random-bert'''
a_ : List[Any] = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
a_ : Dict = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class __lowercase( unittest.TestCase ):
'''simple docstring'''
def snake_case_ ( self ):
__lowerCamelCase : List[Any] = cached_file(__a , __a )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(__a ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(__a , __a ) ) )
with open(os.path.join(__a , 'refs' , 'main' ) ) as f:
__lowerCamelCase : Optional[int] = f.read()
self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) )
self.assertTrue(os.path.isfile(__a ) )
# File is cached at the same place the second time.
__lowerCamelCase : Optional[Any] = cached_file(__a , __a )
self.assertEqual(__a , __a )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : Optional[int] = cached_file(__a , __a , revision='9b8c223' )
self.assertEqual(__a , os.path.join(__a , 'snapshots' , __a , __a ) )
def snake_case_ ( self ):
with self.assertRaisesRegex(__a , 'is not a valid model identifier' ):
__lowerCamelCase : List[str] = cached_file('tiny-random-bert' , __a )
with self.assertRaisesRegex(__a , 'is not a valid git identifier' ):
__lowerCamelCase : int = cached_file(__a , __a , revision='aaaa' )
with self.assertRaisesRegex(__a , 'does not appear to have a file named' ):
__lowerCamelCase : List[Any] = cached_file(__a , 'conf' )
def snake_case_ ( self ):
with self.assertRaisesRegex(__a , 'does not appear to have a file named' ):
__lowerCamelCase : Dict = cached_file(__a , 'conf' )
with open(os.path.join(__a , 'refs' , 'main' ) ) as f:
__lowerCamelCase : Any = f.read()
self.assertTrue(os.path.isfile(os.path.join(__a , '.no_exist' , __a , 'conf' ) ) )
__lowerCamelCase : str = cached_file(__a , 'conf' , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__lowerCamelCase : int = cached_file(__a , 'conf' , local_files_only=__a , _raise_exceptions_for_missing_entries=__a )
self.assertIsNone(__a )
__lowerCamelCase : List[str] = mock.Mock()
__lowerCamelCase : str = 500
__lowerCamelCase : Union[str, Any] = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=__a ) as mock_head:
__lowerCamelCase : Any = cached_file(__a , 'conf' , _raise_exceptions_for_connection_errors=__a )
self.assertIsNone(__a )
# This check we did call the fake head request
mock_head.assert_called()
def snake_case_ ( self ):
self.assertTrue(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
self.assertFalse(has_file('hf-internal-testing/tiny-bert-pt-only' , __a ) )
def snake_case_ ( self ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo('bert-base-cased' , 'ahah.txt' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(__a , 'is not a valid model identifier' ):
get_file_from_repo('bert-base-case' , __a )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(__a , 'is not a valid git identifier' ):
get_file_from_repo('bert-base-cased' , __a , revision='ahaha' )
__lowerCamelCase : Optional[int] = get_file_from_repo('bert-base-cased' , __a )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : int = json.loads(open(__a , 'r' ).read() )
self.assertEqual(config['hidden_size'] , 768 )
def snake_case_ ( self ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : int = Path(__a ) / 'a.txt'
filename.touch()
self.assertEqual(get_file_from_repo(__a , 'a.txt' ) , str(__a ) )
self.assertIsNone(get_file_from_repo(__a , 'b.txt' ) )
| 263
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A : Dict = {
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[Any] = ['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__A : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 334
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
__A : Dict = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class __UpperCamelCase ( unittest.TestCase ):
lowercase : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowercase : Tuple = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowercase : int = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowercase : Any = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def a__ ( self :int ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :str ,_UpperCamelCase :List[Any] ):
snake_case_ : Optional[Any] = ZeroShotClassificationPipeline(
model=_UpperCamelCase ,tokenizer=_UpperCamelCase ,candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def a__ ( self :Dict ,_UpperCamelCase :Any ,_UpperCamelCase :int ):
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# No kwarg
snake_case_ : List[str] = classifier("""Who are you voting for in 2020?""" ,["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : Union[str, Any] = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics"""] )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
snake_case_ : int = classifier("""Who are you voting for in 2020?""" ,candidate_labels="""politics, public health""" )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : Any = classifier("""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) ,1.0 )
snake_case_ : List[Any] = classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""This text is about {}""" )
self.assertEqual(_UpperCamelCase ,{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
snake_case_ : Union[str, Any] = classifier(["""I am happy"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(1 )
] ,)
snake_case_ : Union[str, Any] = classifier(["""I am happy""", """I am sad"""] ,["""positive""", """negative"""] )
self.assertEqual(
_UpperCamelCase ,[
{"""sequence""": ANY(_UpperCamelCase ), """labels""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )], """scores""": [ANY(_UpperCamelCase ), ANY(_UpperCamelCase )]}
for i in range(2 )
] ,)
with self.assertRaises(_UpperCamelCase ):
classifier("""""" ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier(_UpperCamelCase ,candidate_labels="""politics""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels="""""" )
with self.assertRaises(_UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" ,candidate_labels=_UpperCamelCase )
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template="""Not formatting template""" ,)
with self.assertRaises(_UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" ,candidate_labels="""politics""" ,hypothesis_template=_UpperCamelCase ,)
self.run_entailment_id(_UpperCamelCase )
def a__ ( self :Tuple ,_UpperCamelCase :Pipeline ):
snake_case_ : int = zero_shot_classifier.model.config
snake_case_ : Union[str, Any] = config.labelaid
snake_case_ : Optional[Any] = zero_shot_classifier.entailment_id
snake_case_ : List[Any] = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,-1 )
snake_case_ : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : int = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id ,0 )
snake_case_ : Tuple = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id ,2 )
snake_case_ : Optional[Any] = original_labelaid
self.assertEqual(_UpperCamelCase ,zero_shot_classifier.entailment_id )
@require_torch
def a__ ( self :Any ):
snake_case_ : Dict = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 ,candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def a__ ( self :Dict ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""pt""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@require_tf
def a__ ( self :Tuple ):
snake_case_ : Union[str, Any] = pipeline(
"""zero-shot-classification""" ,model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" ,framework="""tf""" ,)
snake_case_ : Union[str, Any] = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_33, 0.3_33, 0.3_33],
} ,)
@slow
@require_torch
def a__ ( self :Optional[int] ):
snake_case_ : Dict = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""pt""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : int = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,)
@slow
@require_tf
def a__ ( self :Optional[int] ):
snake_case_ : List[str] = pipeline("""zero-shot-classification""" ,model="""roberta-large-mnli""" ,framework="""tf""" )
snake_case_ : str = zero_shot_classifier(
"""Who are you voting for in 2020?""" ,candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_76, 0.0_15, 0.0_09],
} ,)
snake_case_ : Optional[int] = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" ,candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] ,multi_label=_UpperCamelCase ,)
self.assertEqual(
nested_simplify(_UpperCamelCase ) ,{
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} ,)
| 334
| 1
|
from __future__ import annotations
from math import pi, sqrt
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
if inductance <= 0:
raise ValueError("""Inductance cannot be 0 or negative""" )
elif capacitance <= 0:
raise ValueError("""Capacitance cannot be 0 or negative""" )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"pipelines_utils",
"0.22.0",
"Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.",
standard_warn=False,
stacklevel=3,
)
| 310
| 0
|
import baseaa
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode('''utf-8''' ) )
def A__ ( SCREAMING_SNAKE_CASE_ : bytes ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE_ ).decode('''utf-8''' )
if __name__ == "__main__":
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ = baseaa_decode(encoded)
print(decoded)
| 32
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
'''configuration_albert''': ['''ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''AlbertConfig''', '''AlbertOnnxConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''AlbertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AlbertForMaskedLM''',
'''AlbertForMultipleChoice''',
'''AlbertForPreTraining''',
'''AlbertForQuestionAnswering''',
'''AlbertForSequenceClassification''',
'''AlbertForTokenClassification''',
'''AlbertModel''',
'''AlbertPreTrainedModel''',
'''load_tf_weights_in_albert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAlbertForMaskedLM''',
'''TFAlbertForMultipleChoice''',
'''TFAlbertForPreTraining''',
'''TFAlbertForQuestionAnswering''',
'''TFAlbertForSequenceClassification''',
'''TFAlbertForTokenClassification''',
'''TFAlbertMainLayer''',
'''TFAlbertModel''',
'''TFAlbertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxAlbertForMaskedLM''',
'''FlaxAlbertForMultipleChoice''',
'''FlaxAlbertForPreTraining''',
'''FlaxAlbertForQuestionAnswering''',
'''FlaxAlbertForSequenceClassification''',
'''FlaxAlbertForTokenClassification''',
'''FlaxAlbertModel''',
'''FlaxAlbertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 354
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {"""openai-gpt""": """https://huggingface.co/openai-gpt/resolve/main/config.json"""}
class lowerCAmelCase__ ( UpperCamelCase__ ):
'''simple docstring'''
lowerCamelCase__ = """openai-gpt"""
lowerCamelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowercase=40478 , lowercase=512 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1E-5 , lowercase=0.02 , lowercase="cls_index" , lowercase=True , lowercase=None , lowercase=True , lowercase=0.1 , **lowercase , ):
_lowerCamelCase : Optional[Any] = vocab_size
_lowerCamelCase : Optional[int] = n_positions
_lowerCamelCase : Tuple = n_embd
_lowerCamelCase : Optional[Any] = n_layer
_lowerCamelCase : Any = n_head
_lowerCamelCase : Optional[int] = afn
_lowerCamelCase : int = resid_pdrop
_lowerCamelCase : List[Any] = embd_pdrop
_lowerCamelCase : List[Any] = attn_pdrop
_lowerCamelCase : int = layer_norm_epsilon
_lowerCamelCase : Tuple = initializer_range
_lowerCamelCase : Tuple = summary_type
_lowerCamelCase : List[Any] = summary_use_proj
_lowerCamelCase : int = summary_activation
_lowerCamelCase : int = summary_first_dropout
_lowerCamelCase : Union[str, Any] = summary_proj_to_labels
super().__init__(**_a )
| 710
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase = 101 ):
_lowerCamelCase : Optional[int] = length
def __len__( self ):
return self.length
def __getitem__( self , lowercase ):
return i
class lowerCAmelCase__ :
'''simple docstring'''
def __call__( self , lowercase ):
return {"input_ids": torch.tensor(lowercase ), "labels": torch.tensor(lowercase )}
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_lowerCamelCase : int = nn.Linear(120 , 80 )
def A_ ( self , lowercase , lowercase=None ):
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_neuroncore
def A_ ( self ):
_lowerCamelCase : Optional[Any] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : int = self.get_auto_remove_tmp_dir()
_lowerCamelCase : str = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Tuple = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@require_torch_multi_gpu
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir()
_lowerCamelCase : Any = F'''--output_dir {output_dir}'''.split()
_lowerCamelCase : Any = ['torchrun'] + distributed_args + args
execute_subprocess_async(lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase__ = HfArgumentParser((TrainingArguments,))
lowercase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowercase__ = DummyDataset(dataset_length)
def _snake_case ( lowercase__ ):
_lowerCamelCase : int = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Optional[int] = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''' )
return {"success": success}
lowercase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = 2
lowercase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase__ = None
| 492
| 0
|
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _a ( UpperCamelCase__ ):
def __init__( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
_snake_case = path_or_paths
_snake_case = split if split or isinstance(__lowerCamelCase ,__lowerCamelCase ) else "train"
_snake_case = features
_snake_case = cache_dir
_snake_case = keep_in_memory
_snake_case = streaming
_snake_case = num_proc
_snake_case = kwargs
@abstractmethod
def _lowercase ( self ) -> Union[str, Any]:
pass
class _a ( UpperCamelCase__ ):
def __init__( self ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Tuple:
_snake_case = features
_snake_case = cache_dir
_snake_case = keep_in_memory
_snake_case = streaming
_snake_case = num_proc
_snake_case = kwargs
@abstractmethod
def _lowercase ( self ) -> List[str]:
pass
| 185
|
def lowerCAmelCase_ ( A_):
UpperCamelCase__: Optional[int] = len(A_)
for i in range(1 ,A_):
UpperCamelCase__: List[Any] = collection[i]
UpperCamelCase__: Tuple = 0
UpperCamelCase__: Union[str, Any] = i - 1
while low <= high:
UpperCamelCase__: Any = (low + high) // 2
if val < collection[mid]:
UpperCamelCase__: str = mid - 1
else:
UpperCamelCase__: str = mid + 1
for j in range(A_ ,A_ ,-1):
UpperCamelCase__: Optional[int] = collection[j - 1]
UpperCamelCase__: Optional[Any] = val
return collection
if __name__ == "__main__":
A__: Dict = input('''Enter numbers separated by a comma:\n''').strip()
A__: Optional[int] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 380
| 0
|
"""simple docstring"""
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = PriorTransformer
__UpperCAmelCase : Tuple = "hidden_states"
@property
def snake_case ( self : Union[str, Any] ):
__lowercase : List[Any] = 4
__lowercase : List[str] = 8
__lowercase : Optional[int] = 7
__lowercase : Optional[int] = floats_tensor((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : Tuple = floats_tensor((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : List[str] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case ( self : List[Any] , lowercase__ : int=0 ):
torch.manual_seed(lowercase__ )
__lowercase : Optional[int] = 4
__lowercase : Optional[int] = 8
__lowercase : List[str] = 7
__lowercase : int = torch.randn((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def snake_case ( self : Union[str, Any] ):
return (4, 8)
@property
def snake_case ( self : Dict ):
return (4, 8)
def snake_case ( self : Optional[Any] ):
__lowercase : Optional[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
__lowercase : Any = self.dummy_input
return init_dict, inputs_dict
def snake_case ( self : str ):
__lowercase ,__lowercase : Union[str, Any] = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy" , output_loading_info=lowercase__ )
self.assertIsNotNone(lowercase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowercase__ )
__lowercase : str = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def snake_case ( self : int ):
__lowercase ,__lowercase : List[str] = self.prepare_init_args_and_inputs_for_common()
__lowercase : Dict = self.model_class(**lowercase__ )
__lowercase : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : List[str] = [*signature.parameters.keys()]
__lowercase : Optional[int] = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2] , lowercase__ )
def snake_case ( self : int ):
__lowercase : List[Any] = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
__lowercase : List[str] = model.to(lowercase__ )
if hasattr(lowercase__ , "set_default_attn_processor" ):
model.set_default_attn_processor()
__lowercase : Optional[int] = self.get_dummy_seed_input()
with torch.no_grad():
__lowercase : Tuple = model(**lowercase__ )[0]
__lowercase : Optional[Any] = output[0, :5].flatten().cpu()
print(lowercase__ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
__lowercase : Union[str, Any] = torch.tensor([-1.3_4_3_6, -0.2_8_7_0, 0.7_5_3_8, 0.4_3_6_8, -0.0_2_3_9] )
self.assertTrue(torch_all_close(lowercase__ , lowercase__ , rtol=1e-2 ) )
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int , lowercase__ : Optional[int]=1 , lowercase__ : Dict=7_6_8 , lowercase__ : List[Any]=7_7 , lowercase__ : Tuple=0 ):
torch.manual_seed(lowercase__ )
__lowercase : List[str] = batch_size
__lowercase : str = embedding_dim
__lowercase : List[Any] = num_embeddings
__lowercase : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : List[str] = torch.randn((batch_size, embedding_dim) ).to(lowercase__ )
__lowercase : Dict = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase__ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def snake_case ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5_8_6_1, 0.1_2_8_3, -0.0_9_3_1, 0.0_8_8_2, 0.4_4_7_6, 0.1_3_2_9, -0.0_4_9_8, 0.0_6_4_0]],
[3_7, [-0.4_9_1_3, 0.0_1_1_0, -0.0_4_8_3, 0.0_5_4_1, 0.4_9_5_4, -0.0_1_7_0, 0.0_3_5_4, 0.1_6_5_1]],
# fmt: on
] )
def snake_case ( self : int , lowercase__ : str , lowercase__ : List[str] ):
__lowercase : Union[str, Any] = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" )
model.to(lowercase__ )
__lowercase : Optional[Any] = self.get_dummy_seed_input(seed=lowercase__ )
with torch.no_grad():
__lowercase : Union[str, Any] = model(**lowercase__ )[0]
assert list(sample.shape ) == [1, 7_6_8]
__lowercase : Any = sample[0, :8].flatten().cpu()
print(lowercase__ )
__lowercase : int = torch.tensor(lowercase__ )
assert torch_all_close(lowercase__ , lowercase__ , atol=1e-3 )
| 281
|
"""simple docstring"""
from __future__ import annotations
def snake_case__ ( _lowerCamelCase, _lowerCamelCase = None ) ->list[list[str]]:
"""simple docstring"""
__lowercase : List[Any] = word_bank or []
# create a table
__lowercase : int = len(_lowerCamelCase ) + 1
__lowercase : list[list[list[str]]] = []
for _ in range(_lowerCamelCase ):
table.append([] )
# seed value
__lowercase : Any = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(_lowerCamelCase ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(_lowerCamelCase )] == word:
__lowercase : list[list[str]] = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(_lowerCamelCase )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(_lowerCamelCase )]:
combination.reverse()
return table[len(_lowerCamelCase )]
if __name__ == "__main__":
print(all_construct('jwajalapa', ['jwa', 'j', 'w', 'a', 'la', 'lapa']))
print(all_construct('rajamati', ['s', 'raj', 'amat', 'raja', 'ma', 'i', 't']))
print(
all_construct(
'hexagonosaurus',
['h', 'ex', 'hex', 'ag', 'ago', 'ru', 'auru', 'rus', 'go', 'no', 'o', 's'],
)
)
| 281
| 1
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 4_00 * 2**20, 6_00 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 1_00 * 2**20, 9_00 * 2**20] )
def A_ ( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE : Any = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
__SCREAMING_SNAKE_CASE : int = dataset_size < in_memory_max_size
else:
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Any = is_small_dataset(__SCREAMING_SNAKE_CASE )
assert result == expected
| 158
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def A__ ( A : np.ndarray , A : Union[int, Iterable[int]] , A : bool , A : int):
'''simple docstring'''
def constraint_to_multiple_of(A : List[str] , A : Any , A : Optional[Any]=0 , A : Dict=None):
UpperCamelCase : Tuple = round(val / multiple) * multiple
if max_val is not None and x > max_val:
UpperCamelCase : Optional[Any] = math.floor(val / multiple) * multiple
if x < min_val:
UpperCamelCase : Dict = math.ceil(val / multiple) * multiple
return x
UpperCamelCase : Dict = (output_size, output_size) if isinstance(A , A) else output_size
UpperCamelCase , UpperCamelCase : str = get_image_size(A)
UpperCamelCase , UpperCamelCase : Any = output_size
# determine new height and width
UpperCamelCase : List[str] = output_height / input_height
UpperCamelCase : Tuple = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width) < abs(1 - scale_height):
# fit width
UpperCamelCase : List[str] = scale_width
else:
# fit height
UpperCamelCase : str = scale_height
UpperCamelCase : str = constraint_to_multiple_of(scale_height * input_height , multiple=A)
UpperCamelCase : int = constraint_to_multiple_of(scale_width * input_width , multiple=A)
return (new_height, new_width)
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ['''pixel_values''']
def __init__( self , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = PILImageResampling.BILINEAR , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = True , lowerCamelCase = 1 / 2_55 , lowerCamelCase = True , lowerCamelCase = None , lowerCamelCase = None , **lowerCamelCase , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase )
UpperCamelCase : Dict = size if size is not None else {"height": 3_84, "width": 3_84}
UpperCamelCase : int = get_size_dict(lowerCamelCase )
UpperCamelCase : str = do_resize
UpperCamelCase : int = size
UpperCamelCase : str = keep_aspect_ratio
UpperCamelCase : int = ensure_multiple_of
UpperCamelCase : Optional[int] = resample
UpperCamelCase : Optional[int] = do_rescale
UpperCamelCase : Any = rescale_factor
UpperCamelCase : Tuple = do_normalize
UpperCamelCase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase : Optional[Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = 1 , lowerCamelCase = PILImageResampling.BICUBIC , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase : List[Any] = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
UpperCamelCase : Optional[Any] = get_resize_output_image_size(
lowerCamelCase , output_size=(size["height"], size["width"]) , keep_aspect_ratio=lowerCamelCase , multiple=lowerCamelCase , )
return resize(lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> Any:
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , **lowerCamelCase , ) -> np.ndarray:
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = None , lowerCamelCase = ChannelDimension.FIRST , **lowerCamelCase , ) -> PIL.Image.Image:
'''simple docstring'''
UpperCamelCase : List[str] = do_resize if do_resize is not None else self.do_resize
UpperCamelCase : Optional[int] = size if size is not None else self.size
UpperCamelCase : Optional[int] = get_size_dict(lowerCamelCase )
UpperCamelCase : Tuple = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase : Optional[int] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase : str = resample if resample is not None else self.resample
UpperCamelCase : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase : Dict = image_mean if image_mean is not None else self.image_mean
UpperCamelCase : List[str] = image_std if image_std is not None else self.image_std
UpperCamelCase : Optional[int] = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase : Dict = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
UpperCamelCase : Optional[Any] = [self.resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
UpperCamelCase : Union[str, Any] = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
UpperCamelCase : Dict = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
UpperCamelCase : List[str] = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
UpperCamelCase : Dict = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Dict:
'''simple docstring'''
UpperCamelCase : Optional[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(lowerCamelCase ):
UpperCamelCase : Dict = target_sizes.numpy()
UpperCamelCase : List[Any] = []
for idx in range(len(lowerCamelCase ) ):
UpperCamelCase : Tuple = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowerCamelCase )
UpperCamelCase : str = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowerCamelCase )
else:
UpperCamelCase : Any = logits.argmax(dim=1 )
UpperCamelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 173
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
'''configuration_informer''': [
'''INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[str] = [
'''INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InformerForPrediction''',
'''InformerModel''',
'''InformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 707
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __lowerCamelCase ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Dict = StableDiffusionInstructPixaPixPipeline
a_: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
a_: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_: Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_: str = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ ( self : Dict ):
torch.manual_seed(0 )
_lowerCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
_lowerCAmelCase =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
torch.manual_seed(0 )
_lowerCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
_lowerCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_lowerCAmelCase =CLIPTextModel(lowerCamelCase_ )
_lowerCAmelCase =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str=0 ):
_lowerCAmelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" )
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase ="""french fries"""
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
_lowerCAmelCase =output.images
_lowerCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =[inputs["""prompt"""]] * 2
_lowerCAmelCase =np.array(inputs["""image"""] ).astype(np.floataa ) / 255.0
_lowerCAmelCase =torch.from_numpy(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
_lowerCAmelCase =image / 2 + 0.5
_lowerCAmelCase =image.permute(0 , 3 , 1 , 2 )
_lowerCAmelCase =image.repeat(2 , 1 , 1 , 1 )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
_lowerCAmelCase =np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu""" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" )
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =sd_pipe.to(lowerCamelCase_ )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =self.get_dummy_inputs(lowerCamelCase_ )
_lowerCAmelCase =sd_pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =[round(lowerCamelCase_ , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(lowerCamelCase_ ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
_lowerCAmelCase =np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Dict ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ ( self : str ):
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline(**lowerCamelCase_ )
_lowerCAmelCase =VaeImageProcessor(do_resize=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =pipe(**self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="""pt""" ) )[0]
_lowerCAmelCase =components["""vae"""]
_lowerCAmelCase =self.get_dummy_inputs_by_type(lowerCamelCase_ , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowerCAmelCase =vae.encode(inputs[image_param] ).latent_dist.mode()
_lowerCAmelCase =pipe(**lowerCamelCase_ )[0]
_lowerCAmelCase =np.abs(out - out_latents_inputs ).max()
self.assertLess(lowerCamelCase_ , 1e-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : Optional[int]=0 ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
_lowerCAmelCase ={
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
_lowerCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : List[str] ):
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ )
_lowerCAmelCase =DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ ).images
_lowerCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase =np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCAmelCase__ ( self : Tuple ):
_lowerCAmelCase =0
def callback_fn(lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : torch.FloatTensor ) -> None:
_lowerCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase =latents[0, -3:, -3:, -1]
_lowerCAmelCase =np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
_lowerCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
_lowerCAmelCase =latents[0, -3:, -3:, -1]
_lowerCAmelCase =np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
_lowerCAmelCase =False
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =self.get_inputs()
pipe(**lowerCamelCase_ , callback=lowerCamelCase_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCAmelCase__ ( self : str ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=lowerCamelCase_ , torch_dtype=torch.floataa )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase =self.get_inputs()
_lowerCAmelCase =pipe(**lowerCamelCase_ )
_lowerCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowerCAmelCase =inputs["""image"""].resize((504, 504) )
_lowerCAmelCase ="""timbrooks/instruct-pix2pix"""
_lowerCAmelCase =StableDiffusionInstructPixaPixPipeline.from_pretrained(
lowerCamelCase_ , safety_checker=lowerCamelCase_ , )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
pipe.enable_attention_slicing()
_lowerCAmelCase =pipe(**lowerCamelCase_ )
_lowerCAmelCase =output.images[0]
_lowerCAmelCase =image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
_lowerCAmelCase =np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 149
| 0
|
import unittest
from transformers.testing_utils import require_bsa
from transformers.utils import is_bsa_available
from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin
if is_bsa_available():
from transformers import MarkupLMFeatureExtractor
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
def lowercase_ ( self ):
'''simple docstring'''
return {}
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''<HTML>
<HEAD>
<TITLE>sample document</TITLE>
</HEAD>
<BODY BGCOLOR="FFFFFF">
<HR>
<a href="http://google.com">Goog</a>
<H1>This is one header</H1>
<H2>This is a another Header</H2>
<P>Travel from
<P>
<B>SFO to JFK</B>
<BR>
<B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>
<HR>
<div style="color:#0000FF">
<h3>Traveler <b> name </b> is
<p> John Doe </p>
</div>'''
SCREAMING_SNAKE_CASE__ = '''
<!DOCTYPE html>
<html>
<body>
<h1>My First Heading</h1>
<p>My first paragraph.</p>
</body>
</html>
'''
return [html_string_a, html_string_a]
@require_bsa
class __snake_case ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = MarkupLMFeatureExtractor if is_bsa_available() else None
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = MarkupLMFeatureExtractionTester(self )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.feature_extract_tester.prepare_feat_extract_dict()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.feature_extraction_class()
# Test not batched input
SCREAMING_SNAKE_CASE__ = get_html_strings()[0]
SCREAMING_SNAKE_CASE__ = feature_extractor(A_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = [['''sample document''', '''Goog''', '''This is one header''', '''This is a another Header''', '''Travel from''', '''SFO to JFK''', '''on May 2, 2015 at 2:00 pm. For details go to confirm.com''', '''Traveler''', '''name''', '''is''', '''John Doe''']]
SCREAMING_SNAKE_CASE__ = [['''/html/head/title''', '''/html/body/a''', '''/html/body/h1''', '''/html/body/h2''', '''/html/body/p''', '''/html/body/p/p/b[1]''', '''/html/body/p/p/b[2]/i''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/b''', '''/html/body/p/p/div/h3''', '''/html/body/p/p/div/h3/p''']]
# fmt: on
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
# Test batched
SCREAMING_SNAKE_CASE__ = get_html_strings()
SCREAMING_SNAKE_CASE__ = feature_extractor(A_ )
# fmt: off
SCREAMING_SNAKE_CASE__ = expected_nodes + [['''My First Heading''', '''My first paragraph.''']]
SCREAMING_SNAKE_CASE__ = expected_xpaths + [['''/html/body/h1''', '''/html/body/p''']]
self.assertEqual(len(encoding.nodes ) , 2 )
self.assertEqual(len(encoding.xpaths ) , 2 )
self.assertEqual(encoding.nodes , A_ )
self.assertEqual(encoding.xpaths , A_ )
| 100
|
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
_A : List[str] = logging.get_logger(__name__)
def __snake_case ( lowerCAmelCase_ ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
SCREAMING_SNAKE_CASE__ = MaskFormerConfig(backbone_config=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 8_4_7
SCREAMING_SNAKE_CASE__ = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_5_0
SCREAMING_SNAKE_CASE__ = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_7_1
SCREAMING_SNAKE_CASE__ = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
SCREAMING_SNAKE_CASE__ = 1_3_3
SCREAMING_SNAKE_CASE__ = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 1_9
SCREAMING_SNAKE_CASE__ = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
SCREAMING_SNAKE_CASE__ = 6_5
SCREAMING_SNAKE_CASE__ = '''mapillary-vistas-id2label.json'''
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
return config
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.norm2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.layers.{i}.downsample.reduction.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.weight''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.layers.{i}.downsample.norm.bias''', f'''model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''model.pixel_level_module.encoder.hidden_states_norms.{i}.bias''') )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight''') )
rename_keys.append((f'''sem_seg_head.adapter_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.weight''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight''') )
rename_keys.append((f'''sem_seg_head.layer_{source_index}.norm.bias''', f'''model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias''') )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias''') )
# cross-attention out projection
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias''') )
# MLP 1
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc1.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc1.bias''') )
# MLP 2
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight''', f'''model.transformer_module.decoder.layers.{idx}.fc2.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias''', f'''model.transformer_module.decoder.layers.{idx}.fc2.bias''') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias''', f'''model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias''') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias''', f'''model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias''') )
# layernorm 3 (final layernorm)
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias''', f'''model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.weight''', f'''mask_embedder.{i}.0.weight''') )
rename_keys.append((f'''sem_seg_head.predictor.mask_embed.layers.{i}.bias''', f'''mask_embedder.{i}.0.bias''') )
# fmt: on
return rename_keys
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = dct.pop(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = val
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
SCREAMING_SNAKE_CASE__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''backbone.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[:dim, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: dim]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
dim : dim * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
dim : dim * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-dim :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-dim :]
# fmt: on
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
# fmt: off
SCREAMING_SNAKE_CASE__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight''' )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f'''sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[: hidden_size, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[:config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[hidden_size : hidden_size * 2, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[hidden_size : hidden_size * 2]
SCREAMING_SNAKE_CASE__ = in_proj_weight[-hidden_size :, :]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-hidden_size :]
# fmt: on
def __snake_case ( ) -> torch.Tensor:
SCREAMING_SNAKE_CASE__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = get_maskformer_config(lowerCAmelCase_ )
# load original state_dict
with open(lowerCAmelCase_ , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ = pickle.load(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
SCREAMING_SNAKE_CASE__ = create_rename_keys(lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_swin_q_k_v(lowerCAmelCase_ , config.backbone_config )
read_in_decoder_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# update to torch tensors
for key, value in state_dict.items():
SCREAMING_SNAKE_CASE__ = torch.from_numpy(lowerCAmelCase_ )
# load 🤗 model
SCREAMING_SNAKE_CASE__ = MaskFormerForInstanceSegmentation(lowerCAmelCase_ )
model.eval()
for name, param in model.named_parameters():
print(lowerCAmelCase_ , param.shape )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(lowerCAmelCase_ ) == 0, f'''Unexpected keys: {unexpected_keys}'''
# verify results
SCREAMING_SNAKE_CASE__ = prepare_img()
if "vistas" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5
elif "cityscapes" in model_name:
SCREAMING_SNAKE_CASE__ = 6_5_5_3_5
else:
SCREAMING_SNAKE_CASE__ = 2_5_5
SCREAMING_SNAKE_CASE__ = True if '''ade''' in model_name else False
SCREAMING_SNAKE_CASE__ = MaskFormerImageProcessor(ignore_index=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = image_processor(lowerCAmelCase_ , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ = model(**lowerCAmelCase_ )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and image processor to {pytorch_dump_folder_path}''' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f'''nielsr/{model_name}''' )
image_processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
_A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""maskformer-swin-tiny-ade""",
type=str,
help=("""Name of the MaskFormer model you'd like to convert""",),
)
parser.add_argument(
"""--checkpoint_path""",
default="""/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl""",
type=str,
help="""Path to the original state dict (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_A : str = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 100
| 1
|
def _A ( __snake_case :int ) -> str:
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
__SCREAMING_SNAKE_CASE = [True] * (num + 1)
__SCREAMING_SNAKE_CASE = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case__ ):
__SCREAMING_SNAKE_CASE = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case : str = int(input('Enter a positive integer: ').strip())
print(prime_sieve_eratosthenes(user_num))
| 700
|
import argparse
_snake_case : Union[str, Any] = 'docs/source/_static/js/custom.js'
def _A ( __snake_case :List[Any] ) -> Any:
"""simple docstring"""
with open(__snake_case , encoding="utf-8" , newline="\n" ) as f:
__SCREAMING_SNAKE_CASE = f.readlines()
__SCREAMING_SNAKE_CASE = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
__SCREAMING_SNAKE_CASE = f'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f''' "v{version}": "v{version}",\n'''
with open(__snake_case , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(__snake_case )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_snake_case : Tuple = parser.parse_args()
update_custom_js(args.version)
| 214
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
SCREAMING_SNAKE_CASE = '\nHuman: <<task>>\n\nAssistant: '
SCREAMING_SNAKE_CASE = 'huggingface-tools/default-prompts'
SCREAMING_SNAKE_CASE = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="run" ):
if prompt_or_repo_id is None:
__a = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , lowerCAmelCase__ ) is not None:
return prompt_or_repo_id
__a = cached_file(
lowerCAmelCase__ , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
return f.read()
| 99
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 484
| 0
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowerCamelCase_ ( _lowercase , _lowercase ) -> np.array:
__A : List[str] = F"{sampling_rate}"
__A : List[Any] = "1"
__A : Optional[int] = "f32le"
__A : Union[str, Any] = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(_lowercase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__A : Optional[Any] = ffmpeg_process.communicate(_lowercase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__A : Dict = output_stream[0]
__A : List[Any] = np.frombuffer(_lowercase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = "f32le" , ) -> str:
__A : int = F"{sampling_rate}"
__A : List[str] = "1"
if format_for_conversion == "s16le":
__A : List[Any] = 2
elif format_for_conversion == "f32le":
__A : str = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
__A : Optional[Any] = platform.system()
if system == "Linux":
__A : Optional[Any] = "alsa"
__A : Optional[Any] = "default"
elif system == "Darwin":
__A : Union[str, Any] = "avfoundation"
__A : Optional[Any] = ":0"
elif system == "Windows":
__A : Dict = "dshow"
__A : Union[str, Any] = "default"
__A : Any = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__A : Dict = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__A : Dict = _ffmpeg_stream(_lowercase , _lowercase )
for item in iterator:
yield item
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase = None , _lowercase = None , _lowercase = "f32le" , ) -> str:
if stream_chunk_s is not None:
__A : Union[str, Any] = stream_chunk_s
else:
__A : Any = chunk_length_s
__A : List[Any] = ffmpeg_microphone(_lowercase , _lowercase , format_for_conversion=_lowercase )
if format_for_conversion == "s16le":
__A : Optional[Any] = np.intaa
__A : str = 2
elif format_for_conversion == "f32le":
__A : int = np.floataa
__A : Optional[Any] = 4
else:
raise ValueError(F"Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`" )
if stride_length_s is None:
__A : Optional[Any] = chunk_length_s / 6
__A : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(_lowercase , (int, float) ):
__A : Any = [stride_length_s, stride_length_s]
__A : Dict = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__A : Optional[int] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__A : str = datetime.datetime.now()
__A : Any = datetime.timedelta(seconds=_lowercase )
for item in chunk_bytes_iter(_lowercase , _lowercase , stride=(stride_left, stride_right) , stream=_lowercase ):
# Put everything back in numpy scale
__A : List[Any] = np.frombuffer(item["raw"] , dtype=_lowercase )
__A : Any = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__A : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowerCamelCase_ ( _lowercase , _lowercase , _lowercase , _lowercase = False ) -> Optional[Any]:
__A : Optional[int] = B""
__A : Optional[Any] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}" )
__A : int = 0
for raw in iterator:
acc += raw
if stream and len(_lowercase ) < chunk_len:
__A : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(_lowercase ) >= chunk_len:
# We are flushing the accumulator
__A : List[str] = (_stride_left, stride_right)
__A : int = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__A : str = False
yield item
__A : Tuple = stride_left
__A : Optional[int] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(_lowercase ) > stride_left:
__A : List[Any] = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__A : Tuple = False
yield item
def lowerCamelCase_ ( _lowercase , _lowercase ) -> str:
__A : Tuple = 2**24 # 16Mo
try:
with subprocess.Popen(_lowercase , stdout=subprocess.PIPE , bufsize=_lowercase ) as ffmpeg_process:
while True:
__A : Union[str, Any] = ffmpeg_process.stdout.read(_lowercase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 718
|
def lowerCamelCase_ ( _lowercase = 2_000_000 ) -> int:
__A : str = [0 for i in range(n + 1 )]
__A : int = 1
__A : Dict = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , _lowercase ):
__A : str = 1
__A : Union[str, Any] = 0
for i in range(_lowercase ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(F'''{solution() = }''')
| 387
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__UpperCamelCase : int = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__UpperCamelCase : List[Any] = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__UpperCamelCase : int = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{'pass@1': 0.5, 'pass@2': 1.0}
"""
__UpperCamelCase : Union[str, Any] = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__UpperCamelCase : Any = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def _a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _a ( self : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str]=[1, 10, 100] , _lowerCAmelCase : List[Any]=4 , _lowerCAmelCase : Optional[int]=3.0 ) -> List[Any]:
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_lowerCAmelCase ) as executor:
__lowercase = []
__lowercase = Counter()
__lowercase = 0
__lowercase = defaultdict(_lowerCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_lowerCAmelCase , _lowerCAmelCase ) ):
for candidate in candidates:
__lowercase = candidate + """\n""" + test_case
__lowercase = (test_program, timeout, task_id, completion_id[task_id])
__lowercase = executor.submit(_lowerCAmelCase , *_lowerCAmelCase )
futures.append(_lowerCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_lowerCAmelCase ):
__lowercase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
__lowercase , __lowercase = [], []
for result in results.values():
result.sort()
__lowercase = [r[1]["""passed"""] for r in result]
total.append(len(_lowerCAmelCase ) )
correct.append(sum(_lowerCAmelCase ) )
__lowercase = np.array(_lowerCAmelCase )
__lowercase = np.array(_lowerCAmelCase )
__lowercase = k
__lowercase = {F'pass@{k}': estimate_pass_at_k(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def snake_case ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
def estimator(lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowerCamelCase , lowerCamelCase ):
__lowercase = itertools.repeat(lowerCamelCase , len(lowerCamelCase ) )
else:
assert len(lowerCamelCase ) == len(lowerCamelCase )
__lowercase = iter(lowerCamelCase )
return np.array([estimator(int(lowerCamelCase ) , int(lowerCamelCase ) , lowerCamelCase ) for n, c in zip(lowerCamelCase , lowerCamelCase )] )
| 80
|
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
__snake_case = 0
while n > 0:
res += n % 10
n //= 10
return res
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
__snake_case = abs(_lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
return sum(int(_lowerCamelCase ) for c in str(abs(_lowerCamelCase ) ) )
def _UpperCamelCase ()-> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(_lowerCamelCase : Callable , _lowerCamelCase : int ) -> None:
__snake_case = f'''{func.__name__}({value})'''
__snake_case = timeit(f'''__main__.{call}''' , setup='''import __main__''' )
print(f'''{call:56} = {func(_lowerCamelCase )} -- {timing:.4f} seconds''' )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(_lowerCamelCase , _lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase: int = {'''configuration_vit''': ['''VIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTConfig''', '''ViTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = ['''ViTFeatureExtractor''']
_lowercase: List[Any] = ['''ViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: Tuple = [
'''VIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTForImageClassification''',
'''ViTForMaskedImageModeling''',
'''ViTModel''',
'''ViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: List[Any] = [
'''TFViTForImageClassification''',
'''TFViTModel''',
'''TFViTPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: List[Any] = [
'''FlaxViTForImageClassification''',
'''FlaxViTModel''',
'''FlaxViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
_lowercase: Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 225
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _lowerCamelCase ( snake_case ):
_lowerCAmelCase = torch.exp(snake_case )
_lowerCAmelCase = torch.sum(snake_case , dim=1 ) # sum of exp(x_i)
_lowerCAmelCase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(snake_case ) - B / A
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , lowercase__ : List[str] ):
super().__init__()
_lowerCAmelCase = config.output_attentions
_lowerCAmelCase = config.output_hidden_states
_lowerCAmelCase = nn.ModuleList([BertLayer(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = nn.ModuleList([BertHighway(lowercase__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any ):
if (type(lowercase__ ) is float) or (type(lowercase__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase = x
else:
_lowerCAmelCase = x
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : str ):
_lowerCAmelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Any , lowercase__ : Optional[Any]=None , lowercase__ : List[str]=None , lowercase__ : str=None , lowercase__ : Optional[Any]=None , ):
_lowerCAmelCase = ()
_lowerCAmelCase = ()
_lowerCAmelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = layer_module(
lowercase__ , lowercase__ , head_mask[i] , lowercase__ , lowercase__ )
_lowerCAmelCase = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase = all_attentions + (layer_outputs[1],)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = current_outputs + (all_attentions,)
_lowerCAmelCase = self.highway[i](lowercase__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase = highway_exit[0]
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(lowercase__ , i + 1 )
else:
_lowerCAmelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase = all_hidden_states + (hidden_states,)
_lowerCAmelCase = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase = outputs + (all_attentions,)
_lowerCAmelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Optional[int] , lowercase__ : List[Any] ):
super().__init__(lowercase__ )
_lowerCAmelCase = config
_lowerCAmelCase = BertEmbeddings(lowercase__ )
_lowerCAmelCase = DeeBertEncoder(lowercase__ )
_lowerCAmelCase = BertPooler(lowercase__ )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : List[Any] ):
_lowerCAmelCase = value
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[str] ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(lowercase__ )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : str=None , lowercase__ : Any=None , lowercase__ : int=None , lowercase__ : Optional[int]=None , lowercase__ : Any=None , lowercase__ : int=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
_lowerCAmelCase = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
_lowerCAmelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if encoder_attention_mask is None:
_lowerCAmelCase = torch.ones(lowercase__ , device=lowercase__ )
if token_type_ids is None:
_lowerCAmelCase = torch.zeros(lowercase__ , dtype=torch.long , device=lowercase__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase = self.get_extended_attention_mask(lowercase__ , lowercase__ , lowercase__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase = self.get_head_mask(lowercase__ , self.config.num_hidden_layers )
_lowerCAmelCase = self.embeddings(
input_ids=lowercase__ , position_ids=lowercase__ , token_type_ids=lowercase__ , inputs_embeds=lowercase__ )
_lowerCAmelCase = self.encoder(
lowercase__ , attention_mask=lowercase__ , head_mask=lowercase__ , encoder_hidden_states=lowercase__ , encoder_attention_mask=lowercase__ , )
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
_lowerCAmelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : List[Any] , lowercase__ : int , lowercase__ : Dict ):
_lowerCAmelCase = message
_lowerCAmelCase = exit_layer # start from 1!
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , lowercase__ : Optional[Any] ):
super().__init__()
_lowerCAmelCase = BertPooler(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , lowercase__ : Dict ):
# Pooler
_lowerCAmelCase = encoder_outputs[0]
_lowerCAmelCase = self.pooler(lowercase__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase = bmodel_output[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " ,UpperCAmelCase ,)
class lowerCamelCase__ ( UpperCAmelCase ):
def __init__( self : Union[str, Any] , lowercase__ : Any ):
super().__init__(lowercase__ )
_lowerCAmelCase = config.num_labels
_lowerCAmelCase = config.num_hidden_layers
_lowerCAmelCase = DeeBertModel(lowercase__ )
_lowerCAmelCase = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict , lowercase__ : Dict=None , lowercase__ : int=None , lowercase__ : Union[str, Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : List[Any]=None , lowercase__ : Optional[Any]=None , lowercase__ : Tuple=None , lowercase__ : Optional[int]=-1 , lowercase__ : Optional[int]=False , ):
_lowerCAmelCase = self.num_layers
try:
_lowerCAmelCase = self.bert(
lowercase__ , attention_mask=lowercase__ , token_type_ids=lowercase__ , position_ids=lowercase__ , head_mask=lowercase__ , inputs_embeds=lowercase__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase = outputs[1]
_lowerCAmelCase = self.dropout(lowercase__ )
_lowerCAmelCase = self.classifier(lowercase__ )
_lowerCAmelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase = e.message
_lowerCAmelCase = e.exit_layer
_lowerCAmelCase = outputs[0]
if not self.training:
_lowerCAmelCase = entropy(lowercase__ )
_lowerCAmelCase = []
_lowerCAmelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase = []
for highway_exit in outputs[-1]:
_lowerCAmelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase = MSELoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase = CrossEntropyLoss()
_lowerCAmelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
_lowerCAmelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase = (loss,) + outputs
if not self.training:
_lowerCAmelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 225
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase :Tuple = logging.get_logger(__name__)
_lowerCAmelCase :Union[str, Any] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case__ : Dict = "wav2vec2"
def __init__( self , lowercase__=32 , lowercase__=768 , lowercase__=12 , lowercase__=12 , lowercase__=3_072 , lowercase__="gelu" , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.1 , lowercase__=0.1 , lowercase__=0.0_2 , lowercase__=1E-5 , lowercase__="group" , lowercase__="gelu" , lowercase__=(512, 512, 512, 512, 512, 512, 512) , lowercase__=(5, 2, 2, 2, 2, 2, 2) , lowercase__=(10, 3, 3, 3, 3, 2, 2) , lowercase__=False , lowercase__=128 , lowercase__=16 , lowercase__=False , lowercase__=True , lowercase__=0.0_5 , lowercase__=10 , lowercase__=2 , lowercase__=0.0 , lowercase__=10 , lowercase__=0 , lowercase__=320 , lowercase__=2 , lowercase__=0.1 , lowercase__=100 , lowercase__=256 , lowercase__=256 , lowercase__=0.1 , lowercase__="sum" , lowercase__=False , lowercase__=False , lowercase__=256 , lowercase__=(512, 512, 512, 512, 1_500) , lowercase__=(5, 3, 3, 1, 1) , lowercase__=(1, 2, 3, 1, 1) , lowercase__=512 , lowercase__=0 , lowercase__=1 , lowercase__=2 , lowercase__=False , lowercase__=3 , lowercase__=2 , lowercase__=3 , lowercase__=None , lowercase__=None , **lowercase__ , ) -> str:
super().__init__(**lowercase__ , pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ )
SCREAMING_SNAKE_CASE : Dict = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = feat_extract_norm
SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
SCREAMING_SNAKE_CASE : Dict = list(lowercase__ )
SCREAMING_SNAKE_CASE : Tuple = list(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = conv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE : List[Any] = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE : int = len(self.conv_dim )
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Dict = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout
SCREAMING_SNAKE_CASE : Any = attention_dropout
SCREAMING_SNAKE_CASE : int = activation_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = feat_proj_dropout
SCREAMING_SNAKE_CASE : Any = final_dropout
SCREAMING_SNAKE_CASE : Tuple = layerdrop
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = do_stable_layer_norm
SCREAMING_SNAKE_CASE : List[Any] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE : Optional[Any] = apply_spec_augment
SCREAMING_SNAKE_CASE : Any = mask_time_prob
SCREAMING_SNAKE_CASE : str = mask_time_length
SCREAMING_SNAKE_CASE : Optional[int] = mask_time_min_masks
SCREAMING_SNAKE_CASE : Tuple = mask_feature_prob
SCREAMING_SNAKE_CASE : Optional[Any] = mask_feature_length
SCREAMING_SNAKE_CASE : Dict = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
SCREAMING_SNAKE_CASE : str = num_codevectors_per_group
SCREAMING_SNAKE_CASE : Union[str, Any] = num_codevector_groups
SCREAMING_SNAKE_CASE : int = contrastive_logits_temperature
SCREAMING_SNAKE_CASE : List[Any] = feat_quantizer_dropout
SCREAMING_SNAKE_CASE : Dict = num_negatives
SCREAMING_SNAKE_CASE : Dict = codevector_dim
SCREAMING_SNAKE_CASE : List[Any] = proj_codevector_dim
SCREAMING_SNAKE_CASE : Optional[Any] = diversity_loss_weight
# ctc loss
SCREAMING_SNAKE_CASE : str = ctc_loss_reduction
SCREAMING_SNAKE_CASE : str = ctc_zero_infinity
# adapter
SCREAMING_SNAKE_CASE : str = add_adapter
SCREAMING_SNAKE_CASE : Optional[Any] = adapter_kernel_size
SCREAMING_SNAKE_CASE : List[str] = adapter_stride
SCREAMING_SNAKE_CASE : Optional[Any] = num_adapter_layers
SCREAMING_SNAKE_CASE : List[str] = output_hidden_size or hidden_size
SCREAMING_SNAKE_CASE : int = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
SCREAMING_SNAKE_CASE : Optional[int] = list(lowercase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = list(lowercase__ )
SCREAMING_SNAKE_CASE : str = list(lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = xvector_output_dim
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 251
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , ) -> Union[str, Any]:
super().__init__()
self.register_modules(
vae=lowercase__ , text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , safety_checker=lowercase__ , feature_extractor=lowercase__ , )
def _UpperCamelCase ( self , lowercase__ = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase__ )
def _UpperCamelCase ( self ) -> str:
self.enable_attention_slicing(lowercase__ )
@torch.no_grad()
def __call__( self , lowercase__ , lowercase__ = 512 , lowercase__ = 512 , lowercase__ = 50 , lowercase__ = 7.5 , lowercase__ = None , lowercase__ = 1 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , lowercase__ = "pil" , lowercase__ = True , lowercase__ = None , lowercase__ = 1 , lowercase__ = None , **lowercase__ , ) -> Union[str, Any]:
if isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : Dict = 1
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : int = len(lowercase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase__ , lowercase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowercase__ )}.""" )
# get prompt text embeddings
SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer(
lowercase__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
SCREAMING_SNAKE_CASE : Optional[int] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE : Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[Any] = text_embeddings.shape
SCREAMING_SNAKE_CASE : List[Any] = text_embeddings.repeat(1 , lowercase__ , 1 )
SCREAMING_SNAKE_CASE : str = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE : int = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE : List[str]
if negative_prompt is None:
SCREAMING_SNAKE_CASE : str = ['']
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="""
F""" {type(lowercase__ )}.""" )
elif isinstance(lowercase__ , lowercase__ ):
SCREAMING_SNAKE_CASE : int = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
SCREAMING_SNAKE_CASE : int = negative_prompt
SCREAMING_SNAKE_CASE : int = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE : str = self.tokenizer(
lowercase__ , padding='max_length' , max_length=lowercase__ , truncation=lowercase__ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE : Optional[Any] = uncond_embeddings.repeat(lowercase__ , lowercase__ , 1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE : List[Any] = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
SCREAMING_SNAKE_CASE : Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE : Optional[Any] = torch.randn(
lowercase__ , generator=lowercase__ , device='cpu' , dtype=lowercase__ ).to(self.device )
SCREAMING_SNAKE_CASE : Dict = torch.randn(lowercase__ , generator=lowercase__ , device='cpu' , dtype=lowercase__ ).to(
self.device )
else:
SCREAMING_SNAKE_CASE : Optional[int] = torch.randn(
lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
SCREAMING_SNAKE_CASE : int = torch.randn(lowercase__ , generator=lowercase__ , device=self.device , dtype=lowercase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
SCREAMING_SNAKE_CASE : Optional[Any] = latents_reference.to(self.device )
SCREAMING_SNAKE_CASE : Union[str, Any] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
SCREAMING_SNAKE_CASE : List[Any] = (latents_shape[3] - latents_shape_reference[3]) // 2
SCREAMING_SNAKE_CASE : int = (latents_shape[2] - latents_shape_reference[2]) // 2
SCREAMING_SNAKE_CASE : Tuple = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
SCREAMING_SNAKE_CASE : Any = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
SCREAMING_SNAKE_CASE : Optional[Any] = 0 if dx < 0 else dx
SCREAMING_SNAKE_CASE : Union[str, Any] = 0 if dy < 0 else dy
SCREAMING_SNAKE_CASE : List[str] = max(-dx , 0 )
SCREAMING_SNAKE_CASE : Optional[int] = max(-dy , 0 )
# import pdb
# pdb.set_trace()
SCREAMING_SNAKE_CASE : Optional[int] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowercase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE : Optional[Any] = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE : Union[str, Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE : str = {}
if accepts_eta:
SCREAMING_SNAKE_CASE : Dict = eta
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.scale_model_input(lowercase__ , lowercase__ )
# predict the noise residual
SCREAMING_SNAKE_CASE : int = self.unet(lowercase__ , lowercase__ , encoder_hidden_states=lowercase__ ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.step(lowercase__ , lowercase__ , lowercase__ , **lowercase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase__ , lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : List[Any] = 1 / 0.1_8_2_1_5 * latents
SCREAMING_SNAKE_CASE : Tuple = self.vae.decode(lowercase__ ).sample
SCREAMING_SNAKE_CASE : int = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
SCREAMING_SNAKE_CASE : List[str] = self.feature_extractor(self.numpy_to_pil(lowercase__ ) , return_tensors='pt' ).to(
self.device )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.safety_checker(
images=lowercase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
SCREAMING_SNAKE_CASE : str = None
if output_type == "pil":
SCREAMING_SNAKE_CASE : Optional[int] = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowercase__ , nsfw_content_detected=lowercase__ )
| 251
| 1
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 702
|
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
__magic_name__ = re.compile('''[^A-Za-z_0-9]''')
# parameters used in DuplicationIndex
__magic_name__ = 10
__magic_name__ = 256
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
if len(_UpperCAmelCase ) < MIN_NUM_TOKENS:
return None
lowercase = MinHash(num_perm=_UpperCAmelCase )
for token in set(_UpperCAmelCase ):
min_hash.update(token.encode() )
return min_hash
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
return {t for t in NON_ALPHA.split(_UpperCAmelCase ) if len(t.strip() ) > 0}
class a__ :
"""simple docstring"""
def __init__( self :Dict , *,
lowercase__ :float = 0.85 , ):
lowercase = duplication_jaccard_threshold
lowercase = NUM_PERM
lowercase = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
lowercase = defaultdict(lowercase__ )
def __UpperCAmelCase ( self :Dict , lowercase__ :Tuple , lowercase__ :MinHash ):
lowercase = self._index.query(lowercase__ )
if code_key in self._index.keys:
print(F"""Duplicate key {code_key}""" )
return
self._index.insert(lowercase__ , lowercase__ )
if len(lowercase__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(lowercase__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(lowercase__ )
def __UpperCAmelCase ( self :Dict ):
lowercase = []
for base, duplicates in self._duplicate_clusters.items():
lowercase = [base] + list(lowercase__ )
# reformat the cluster to be a list of dict
lowercase = [{'base_index': el[0], 'repo_name': el[1], 'path': el[2]} for el in cluster]
duplicate_clusters.append(lowercase__ )
return duplicate_clusters
def __UpperCAmelCase ( self :Dict , lowercase__ :List[Any] ):
lowercase = self.get_duplicate_clusters()
with open(lowercase__ , 'w' ) as f:
json.dump(lowercase__ , lowercase__ )
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase , lowercase = element
lowercase = get_min_hash([t for t in NON_ALPHA.split(data['content'] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash , ThreadedIterator(_UpperCAmelCase , max_queue_size=1_00_00 ) , chunksize=1_00 , ):
if data is not None:
yield data
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = DuplicationIndex(duplication_jaccard_threshold=_UpperCAmelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_UpperCAmelCase ) ) , max_queue_size=1_00 ) ):
di.add(_UpperCAmelCase , _UpperCAmelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = get_tokens(_UpperCAmelCase )
lowercase = get_tokens(_UpperCAmelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
__magic_name__ = None
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = []
for elementa in cluster:
lowercase = _shared_dataset[elementa['base_index']]['content']
for elementa in extremes:
lowercase = _shared_dataset[elementa['base_index']]['content']
if jaccard_similarity(_UpperCAmelCase , _UpperCAmelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
lowercase = 1
extremes.append(_UpperCAmelCase )
return extremes
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
global _shared_dataset
lowercase = dataset
lowercase = []
lowercase = partial(_find_cluster_extremes_shared , jaccard_threshold=_UpperCAmelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_UpperCAmelCase , _UpperCAmelCase , ) , total=len(_UpperCAmelCase ) , ):
extremes_list.append(_UpperCAmelCase )
return extremes_list
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase = 0.85 ):
"""simple docstring"""
lowercase = make_duplicate_clusters(_UpperCAmelCase , _UpperCAmelCase )
lowercase = {x['base_index'] for cluster in duplicate_clusters for x in cluster}
lowercase = {}
lowercase = find_extremes(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for extremes in extremes_clusters:
for element in extremes:
lowercase = element
lowercase = duplicate_indices - set(extreme_dict.keys() )
lowercase = dataset.filter(lambda _UpperCAmelCase , _UpperCAmelCase : idx not in remove_indices , with_indices=_UpperCAmelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
lowercase = element['base_index'] in extreme_dict
if element["is_extreme"]:
lowercase = extreme_dict[element['base_index']]['copies']
print(f"""Original dataset size: {len(_UpperCAmelCase )}""" )
print(f"""Number of duplicate clusters: {len(_UpperCAmelCase )}""" )
print(f"""Files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Unique files in duplicate cluster: {len(_UpperCAmelCase )}""" )
print(f"""Filtered dataset size: {len(_UpperCAmelCase )}""" )
return ds_filter, duplicate_clusters
| 314
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 0
|
'''simple docstring'''
from typing import Any
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = data
_A = None
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
'''simple docstring'''
_A = None
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = self.head
while temp is not None:
print(temp.data , end=" " )
_A = temp.next
print()
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : Any ):
'''simple docstring'''
_A = Node(__UpperCAmelCase )
_A = self.head
_A = new_node
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Any ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
_A = self.head
while node_a is not None and node_a.data != node_data_a:
_A = node_a.next
if node_a is None or node_a is None:
return
_A , _A = node_a.data, node_a.data
if __name__ == "__main__":
lowerCamelCase_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list()
| 330
| 0
|
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase__ = TypeVar('T')
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
return (position - 1) // 2
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
return (2 * position) + 1
def __a ( _SCREAMING_SNAKE_CASE ) ->int:
return (2 * position) + 2
class __snake_case ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
a__: list[tuple[T, int]] = []
a__: dict[T, int] = {}
a__: int = 0
def __len__( self) -> int:
'''simple docstring'''
return self.elements
def __repr__( self) -> str:
'''simple docstring'''
return str(self.heap)
def lowerCamelCase_ ( self) -> bool:
'''simple docstring'''
return self.elements == 0
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
self.heap.append((elem, weight))
a__: Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(lowercase)
def lowerCamelCase_ ( self) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1)
a__ , a__: Any = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
a__ , a__: Any = self.heap[0]
self._bubble_down(lowercase)
return elem
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: List[Any] = self.position_map[elem]
a__: Optional[int] = (elem, weight)
if position > 0:
a__: Tuple = get_parent_position(lowercase)
a__ , a__: Any = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowercase)
else:
self._bubble_down(lowercase)
else:
self._bubble_down(lowercase)
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Any = self.position_map[elem]
if curr_pos == 0:
return None
a__: List[Any] = get_parent_position(lowercase)
a__ , a__: Optional[int] = self.heap[curr_pos]
a__ , a__: int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_up(lowercase)
return None
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
a__: Optional[Any] = self.position_map[elem]
a__ , a__: int = self.heap[curr_pos]
a__: int = get_child_left_position(lowercase)
a__: Union[str, Any] = get_child_right_position(lowercase)
if child_left_position < self.elements and child_right_position < self.elements:
a__ , a__: Any = self.heap[child_left_position]
a__ , a__: Any = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
if child_left_position < self.elements:
a__ , a__: Tuple = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
else:
return None
if child_right_position < self.elements:
a__ , a__: Any = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowercase , lowercase)
return self._bubble_down(lowercase)
return None
def lowerCamelCase_ ( self , lowercase , lowercase) -> None:
'''simple docstring'''
a__: Tuple = self.heap[nodea_pos][0]
a__: Any = self.heap[nodea_pos][0]
a__ , a__: List[str] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
a__: Optional[int] = nodea_pos
a__: List[Any] = nodea_pos
class __snake_case ( Generic[T] ):
def __init__( self) -> None:
'''simple docstring'''
a__: dict[T, dict[T, int]] = {}
a__: int = 0
def __repr__( self) -> str:
'''simple docstring'''
return str(self.connections)
def __len__( self) -> int:
'''simple docstring'''
return self.nodes
def lowerCamelCase_ ( self , lowercase) -> None:
'''simple docstring'''
if node not in self.connections:
a__: List[str] = {}
self.nodes += 1
def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> None:
'''simple docstring'''
self.add_node(lowercase)
self.add_node(lowercase)
a__: List[Any] = weight
a__: int = weight
def __a ( _SCREAMING_SNAKE_CASE , ) ->tuple[dict[T, int], dict[T, T | None]]:
a__: dict[T, int] = {node: maxsize for node in graph.connections}
a__: dict[T, T | None] = {node: None for node in graph.connections}
a__: MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
a__: List[Any] = priority_queue.extract_min()
a__: Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a__: Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
a__: List[Any] = node
# running prim's algorithm
while not priority_queue.is_empty():
a__: Optional[int] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
a__: int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
a__: Tuple = node
return dist, parent
| 217
|
"""simple docstring"""
import re
def __a ( _SCREAMING_SNAKE_CASE ) ->list:
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
a__: int = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
try:
a__: List[str] = split_input(_SCREAMING_SNAKE_CASE )
if upper:
a__: Optional[int] = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
a__: Optional[Any] = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __a ( _SCREAMING_SNAKE_CASE ) ->str:
try:
a__: Union[str, Any] = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__('doctest').testmod()
| 217
| 1
|
def a_ ( UpperCamelCase_ : Any ) -> Dict:
"""simple docstring"""
stooge(UpperCamelCase_ , 0 , len(UpperCamelCase_ ) - 1 )
return arr
def a_ ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Union[str, Any] ) -> Any:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowerCamelCase , lowerCamelCase = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowerCamelCase = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase_ , UpperCamelCase_ , (h - t) )
# Recursively sort last 2/3 elements
stooge(UpperCamelCase_ , i + t , (UpperCamelCase_) )
# Recursively sort first 2/3 elements
stooge(UpperCamelCase_ , UpperCamelCase_ , (h - t) )
if __name__ == "__main__":
_lowerCAmelCase : List[Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase : List[str] = [int(item) for item in user_input.split(',')]
print(stooge_sort(unsorted))
| 246
|
from __future__ import annotations
_lowerCAmelCase : Optional[Any] = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __snake_case : dict[str, list[str]] , __snake_case : str ) -> None:
'''simple docstring'''
lowerCamelCase = graph
# mapping node to its parent in resulting breadth first tree
lowerCamelCase = {}
lowerCamelCase = source_vertex
def lowerCamelCase__ ( self : List[Any] ) -> None:
'''simple docstring'''
lowerCamelCase = {self.source_vertex}
lowerCamelCase = None
lowerCamelCase = [self.source_vertex] # first in first out queue
while queue:
lowerCamelCase = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(__snake_case )
lowerCamelCase = vertex
queue.append(__snake_case )
def lowerCamelCase__ ( self : List[Any] , __snake_case : str ) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
lowerCamelCase = self.parent.get(__snake_case )
if target_vertex_parent is None:
lowerCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(__snake_case )
return self.shortest_path(__snake_case ) + F'''->{target_vertex}'''
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 246
| 1
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
snake_case = 42
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _snake_case : Union[str, Any]=3 , _snake_case : Dict=3 , _snake_case : Union[str, Any]=("DownEncoderBlock2D",) , _snake_case : int=(64,) , _snake_case : int=2 , _snake_case : Any=32 , _snake_case : Any="silu" , _snake_case : Any=True , ) -> Any:
"""simple docstring"""
super().__init__()
A_ = layers_per_block
A_ = torch.nn.Convad(
UpperCAmelCase_ , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
# down
A_ = block_out_channels[0]
for i, down_block_type in enumerate(UpperCAmelCase_ ):
A_ = output_channel
A_ = block_out_channels[i]
A_ = i == len(UpperCAmelCase_ ) - 1
A_ = get_down_block(
UpperCAmelCase_ , num_layers=self.layers_per_block , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
self.down_blocks.append(UpperCAmelCase_ )
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# out
A_ = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=UpperCAmelCase_ , eps=1e-6 )
A_ = nn.SiLU()
A_ = 2 * out_channels if double_z else out_channels
A_ = nn.Convad(block_out_channels[-1] , UpperCAmelCase_ , 3 , padding=1 )
A_ = False
def lowerCamelCase__ ( self : Any , _snake_case : List[str] ) -> Union[str, Any]:
"""simple docstring"""
A_ = x
A_ = self.conv_in(UpperCAmelCase_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case : Tuple ):
def custom_forward(*_snake_case : Optional[Any] ):
return module(*UpperCAmelCase_ )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
for down_block in self.down_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ )
# middle
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , UpperCAmelCase_ )
else:
# down
for down_block in self.down_blocks:
A_ = down_block(UpperCAmelCase_ )
# middle
A_ = self.mid_block(UpperCAmelCase_ )
# post-process
A_ = self.conv_norm_out(UpperCAmelCase_ )
A_ = self.conv_act(UpperCAmelCase_ )
A_ = self.conv_out(UpperCAmelCase_ )
return sample
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _snake_case : str=3 , _snake_case : Tuple=3 , _snake_case : Union[str, Any]=("UpDecoderBlock2D",) , _snake_case : str=(64,) , _snake_case : Union[str, Any]=2 , _snake_case : Union[str, Any]=32 , _snake_case : Optional[Any]="silu" , _snake_case : Any="group" , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
A_ = layers_per_block
A_ = nn.Convad(
UpperCAmelCase_ , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
A_ = None
A_ = nn.ModuleList([] )
A_ = in_channels if norm_type == 'spatial' else None
# mid
A_ = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , )
# up
A_ = list(reversed(UpperCAmelCase_ ) )
A_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase_ ):
A_ = output_channel
A_ = reversed_block_out_channels[i]
A_ = i == len(UpperCAmelCase_ ) - 1
A_ = get_up_block(
UpperCAmelCase_ , num_layers=self.layers_per_block + 1 , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , prev_output_channel=UpperCAmelCase_ , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=UpperCAmelCase_ , resnet_groups=UpperCAmelCase_ , attention_head_dim=UpperCAmelCase_ , temb_channels=UpperCAmelCase_ , resnet_time_scale_shift=UpperCAmelCase_ , )
self.up_blocks.append(UpperCAmelCase_ )
A_ = output_channel
# out
if norm_type == "spatial":
A_ = SpatialNorm(block_out_channels[0] , UpperCAmelCase_ )
else:
A_ = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=UpperCAmelCase_ , eps=1e-6 )
A_ = nn.SiLU()
A_ = nn.Convad(block_out_channels[0] , UpperCAmelCase_ , 3 , padding=1 )
A_ = False
def lowerCamelCase__ ( self : Tuple , _snake_case : str , _snake_case : str=None ) -> int:
"""simple docstring"""
A_ = z
A_ = self.conv_in(UpperCAmelCase_ )
A_ = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(_snake_case : Optional[int] ):
def custom_forward(*_snake_case : Tuple ):
return module(*UpperCAmelCase_ )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
A_ = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ , use_reentrant=UpperCAmelCase_ )
else:
# middle
A_ = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , UpperCAmelCase_ , UpperCAmelCase_ )
A_ = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
A_ = torch.utils.checkpoint.checkpoint(create_custom_forward(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
else:
# middle
A_ = self.mid_block(UpperCAmelCase_ , UpperCAmelCase_ )
A_ = sample.to(UpperCAmelCase_ )
# up
for up_block in self.up_blocks:
A_ = up_block(UpperCAmelCase_ , UpperCAmelCase_ )
# post-process
if latent_embeds is None:
A_ = self.conv_norm_out(UpperCAmelCase_ )
else:
A_ = self.conv_norm_out(UpperCAmelCase_ , UpperCAmelCase_ )
A_ = self.conv_act(UpperCAmelCase_ )
A_ = self.conv_out(UpperCAmelCase_ )
return sample
class __lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Tuple , _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Tuple=None , _snake_case : List[Any]="random" , _snake_case : Tuple=False , _snake_case : Optional[Any]=True ) -> Optional[int]:
"""simple docstring"""
super().__init__()
A_ = n_e
A_ = vq_embed_dim
A_ = beta
A_ = legacy
A_ = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
A_ = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
A_ = self.used.shape[0]
A_ = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A_ = self.re_embed
A_ = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A_ = n_e
A_ = sane_index_shape
def lowerCamelCase__ ( self : int , _snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
A_ = inds.shape
assert len(UpperCAmelCase_ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCAmelCase_ )
A_ = (inds[:, :, None] == used[None, None, ...]).long()
A_ = match.argmax(-1 )
A_ = match.sum(2 ) < 1
if self.unknown_index == "random":
A_ = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
A_ = self.unknown_index
return new.reshape(UpperCAmelCase_ )
def lowerCamelCase__ ( self : int , _snake_case : Dict ) -> Any:
"""simple docstring"""
A_ = inds.shape
assert len(UpperCAmelCase_ ) > 1
A_ = inds.reshape(ishape[0] , -1 )
A_ = self.used.to(UpperCAmelCase_ )
if self.re_embed > self.used.shape[0]: # extra token
A_ = 0 # simply set to zero
A_ = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , UpperCAmelCase_ )
return back.reshape(UpperCAmelCase_ )
def lowerCamelCase__ ( self : Dict , _snake_case : Optional[int] ) -> Tuple:
"""simple docstring"""
# reshape z -> (batch, height, width, channel) and flatten
A_ = z.permute(0 , 2 , 3 , 1 ).contiguous()
A_ = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A_ = torch.argmin(torch.cdist(UpperCAmelCase_ , self.embedding.weight ) , dim=1 )
A_ = self.embedding(UpperCAmelCase_ ).view(z.shape )
A_ = None
A_ = None
# compute loss for embedding
if not self.legacy:
A_ = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A_ = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A_ = z + (z_q - z).detach()
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
A_ = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
A_ = self.remap_to_used(UpperCAmelCase_ )
A_ = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
A_ = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCamelCase__ ( self : int , _snake_case : List[Any] , _snake_case : Any ) -> Tuple:
"""simple docstring"""
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A_ = indices.reshape(shape[0] , -1 ) # add batch axis
A_ = self.unmap_to_all(UpperCAmelCase_ )
A_ = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A_ = self.embedding(UpperCAmelCase_ )
if shape is not None:
A_ = z_q.view(UpperCAmelCase_ )
# reshape back to match original input shape
A_ = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class __lowerCAmelCase ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : List[Any] , _snake_case : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
A_ = parameters
A_ = torch.chunk(UpperCAmelCase_ , 2 , dim=1 )
A_ = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
A_ = deterministic
A_ = torch.exp(0.5 * self.logvar )
A_ = torch.exp(self.logvar )
if self.deterministic:
A_ = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Optional[torch.Generator] = None ) -> Any:
"""simple docstring"""
# make sure sample is on the same device as the parameters and has same dtype
A_ = randn_tensor(
self.mean.shape , generator=UpperCAmelCase_ , device=self.parameters.device , dtype=self.parameters.dtype )
A_ = self.mean + self.std * sample
return x
def lowerCamelCase__ ( self : Tuple , _snake_case : Dict=None ) -> int:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def lowerCamelCase__ ( self : Dict , _snake_case : Union[str, Any] , _snake_case : Optional[int]=[1, 2, 3] ) -> Optional[int]:
"""simple docstring"""
if self.deterministic:
return torch.Tensor([0.0] )
A_ = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=UpperCAmelCase_ )
def lowerCamelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
return self.mean
| 706
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : str = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Union[str, Any] = ['''TimmBackbone''']
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
UpperCamelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 482
| 0
|
'''simple docstring'''
from torch import nn
def __snake_case ( lowercase : int ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 508
|
'''simple docstring'''
def __snake_case ( lowercase : int ):
if n == 1 or not isinstance(lowercase , lowercase ):
return 0
elif n == 2:
return 1
else:
snake_case_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __snake_case ( lowercase : int ):
snake_case_ = 0
snake_case_ = 2
while digits < n:
index += 1
snake_case_ = len(str(fibonacci(lowercase ) ) )
return index
def __snake_case ( lowercase : int = 1_000 ):
return fibonacci_digits_index(lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 508
| 1
|
import argparse
import torch
from safetensors.torch import load_file
from diffusers import StableDiffusionPipeline
def _lowerCAmelCase ( _a : int , _a : Dict , _a : Optional[Any] , _a : str , _a : Any ) -> str:
# load base model
lowerCAmelCase_ : List[str] = StableDiffusionPipeline.from_pretrained(UpperCAmelCase__ , torch_dtype=torch.floataa )
# load LoRA weight from .safetensors
lowerCAmelCase_ : Optional[int] = load_file(UpperCAmelCase__ )
lowerCAmelCase_ : str = []
# directly update weight in diffusers model
for key in state_dict:
# it is suggested to print out the key, it usually will be something like below
# "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight"
# as we have set the alpha beforehand, so just skip
if ".alpha" in key or key in visited:
continue
if "text" in key:
lowerCAmelCase_ : List[str] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" )
lowerCAmelCase_ : int = pipeline.text_encoder
else:
lowerCAmelCase_ : str = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" )
lowerCAmelCase_ : Optional[int] = pipeline.unet
# find the target layer
lowerCAmelCase_ : List[str] = layer_infos.pop(0 )
while len(UpperCAmelCase__ ) > -1:
try:
lowerCAmelCase_ : Union[str, Any] = curr_layer.__getattr__(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 0:
lowerCAmelCase_ : Tuple = layer_infos.pop(0 )
elif len(UpperCAmelCase__ ) == 0:
break
except Exception:
if len(UpperCAmelCase__ ) > 0:
temp_name += "_" + layer_infos.pop(0 )
else:
lowerCAmelCase_ : Dict = layer_infos.pop(0 )
lowerCAmelCase_ : int = []
if "lora_down" in key:
pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) )
pair_keys.append(UpperCAmelCase__ )
else:
pair_keys.append(UpperCAmelCase__ )
pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) )
# update weight
if len(state_dict[pair_keys[0]].shape ) == 4:
lowerCAmelCase_ : str = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
lowerCAmelCase_ : List[Any] = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ ).unsqueeze(2 ).unsqueeze(3 )
else:
lowerCAmelCase_ : Dict = state_dict[pair_keys[0]].to(torch.floataa )
lowerCAmelCase_ : int = state_dict[pair_keys[1]].to(torch.floataa )
curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase__ , UpperCAmelCase__ )
# update visited list
for item in pair_keys:
visited.append(UpperCAmelCase__ )
return pipeline
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format."""
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors"""
)
parser.add_argument(
"""--lora_prefix_text_encoder""",
default="""lora_te""",
type=str,
help="""The prefix of text encoder weight in safetensors""",
)
parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""")
parser.add_argument(
"""--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not."""
)
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
UpperCAmelCase_ : List[str] = parser.parse_args()
UpperCAmelCase_ : Tuple = args.base_model_path
UpperCAmelCase_ : Dict = args.checkpoint_path
UpperCAmelCase_ : Union[str, Any] = args.dump_path
UpperCAmelCase_ : Dict = args.lora_prefix_unet
UpperCAmelCase_ : Tuple = args.lora_prefix_text_encoder
UpperCAmelCase_ : List[str] = args.alpha
UpperCAmelCase_ : Dict = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha)
UpperCAmelCase_ : str = pipe.to(args.device)
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Dict = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Union[str, Any] = ["""PoolFormerFeatureExtractor"""]
UpperCAmelCase_ : str = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 440
| 0
|
"""simple docstring"""
from __future__ import annotations
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a , __a : List[Any] = text, pattern
__a , __a : Tuple = len(_UpperCAmelCase ), len(_UpperCAmelCase )
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self , _UpperCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self ):
# searches pattern in text and returns index positions
__a : Dict = []
for i in range(self.textLen - self.patLen + 1 ):
__a : List[str] = self.mismatch_in_text(_UpperCAmelCase )
if mismatch_index == -1:
positions.append(_UpperCAmelCase )
else:
__a : Tuple = self.match_in_pattern(self.text[mismatch_index] )
__a : Optional[int] = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
A = '''ABAABA'''
A = '''AB'''
A = BoyerMooreSearch(text, pattern)
A = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions)
| 52
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
_UpperCAmelCase : Tuple = dict(zip(A , range(len(A ) ) ) )
_UpperCAmelCase : Any = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
_UpperCAmelCase : Optional[Any] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A ) )
_UpperCAmelCase : List[Any] = {
"do_resize": True,
"size": 2_0,
"do_center_crop": True,
"crop_size": 1_8,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
_UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A , A )
def snake_case_ ( self : int , **A : Any ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : List[str] , **A : Tuple ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Any , **A : Optional[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A )
def snake_case_ ( self : Dict ):
shutil.rmtree(self.tmpdirname )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : int = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
_UpperCAmelCase : str = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Dict = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_UpperCAmelCase : Any = CLIPSegProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase : int = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : int = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_UpperCAmelCase : Optional[Any] = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def snake_case_ ( self : str ):
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : Dict = CLIPSegProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : int = image_processor(A , return_tensors="np" )
_UpperCAmelCase : int = processor(images=A , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Any = processor(text=A )
_UpperCAmelCase : int = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Tuple = CLIPSegProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = "lower newer"
_UpperCAmelCase : List[Any] = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : str = CLIPSegProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : List[Any] = self.prepare_image_inputs()
_UpperCAmelCase : int = self.prepare_image_inputs()
_UpperCAmelCase : Any = processor(images=A , visual_prompt=A )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = CLIPSegProcessor(tokenizer=A , image_processor=A )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Union[str, Any] = processor.batch_decode(A )
_UpperCAmelCase : List[Any] = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
| 289
| 0
|
"""simple docstring"""
import json
import os
import re
import sys
import urllib.request
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE__ : List[str] ={
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582'
}
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ = "dhaka" , SCREAMING_SNAKE_CASE_ = 5 ) ->int:
_lowerCamelCase : str = min(SCREAMING_SNAKE_CASE_ , 50 ) # Prevent abuse!
_lowerCamelCase : Union[str, Any] = {
'''q''': query,
'''tbm''': '''isch''',
'''hl''': '''en''',
'''ijn''': '''0''',
}
_lowerCamelCase : Optional[Any] = requests.get('''https://www.google.com/search''' , params=SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : int = BeautifulSoup(html.text , '''html.parser''' )
_lowerCamelCase : Any = ''''''.join(
re.findall(R'''AF_initDataCallback\(([^<]+)\);''' , str(soup.select('''script''' ) ) ) )
_lowerCamelCase : List[Any] = json.dumps(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = json.loads(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[str] = re.findall(
R'''\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",''' , SCREAMING_SNAKE_CASE_ , )
if not matched_google_image_data:
return 0
_lowerCamelCase : Optional[int] = re.sub(
R'''\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]''' , '''''' , str(SCREAMING_SNAKE_CASE_ ) , )
_lowerCamelCase : Union[str, Any] = re.findall(
R'''(?:\'|,),\[\"(https:|http.*?)\",\d+,\d+\]''' , SCREAMING_SNAKE_CASE_ , )
for index, fixed_full_res_image in enumerate(SCREAMING_SNAKE_CASE_ ):
if index >= max_images:
return index
_lowerCamelCase : int = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCamelCase : Optional[Any] = bytes(SCREAMING_SNAKE_CASE_ , '''ascii''' ).decode(
'''unicode-escape''' )
_lowerCamelCase : Optional[Any] = urllib.request.build_opener()
_lowerCamelCase : int = [
(
'''User-Agent''',
'''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'''
''' (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582''',
)
]
urllib.request.install_opener(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : List[Any] = F'''query_{query.replace(" " , "_" )}'''
if not os.path.exists(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
urllib.request.urlretrieve( # noqa: S310
SCREAMING_SNAKE_CASE_ , F'''{path_name}/original_size_img_{index}.jpg''' )
return index
if __name__ == "__main__":
try:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =download_images_from_google_query(sys.argv[1])
print(F"""{image_count} images were downloaded to disk.""")
except IndexError:
print('Please provide a search term.')
raise
| 558
|
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def a__ ( self ) -> Tuple:
super().setUp()
# fmt: off
_lowerCamelCase : List[str] = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
_lowerCamelCase : Any = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_lowerCamelCase : Union[str, Any] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''']
_lowerCamelCase : List[Any] = {'''unk_token''': '''<unk>'''}
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def a__ ( self , **_lowercase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , **_lowercase ) -> Dict:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def a__ ( self , _lowercase ) -> Any:
_lowerCamelCase : Any = '''lower newer'''
_lowerCamelCase : Any = '''lower newer'''
return input_text, output_text
def a__ ( self ) -> Tuple:
_lowerCamelCase : Dict = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowerCamelCase : Union[str, Any] = '''lower newer'''
_lowerCamelCase : Optional[int] = ['''lo''', '''w''', '''er</w>''', '''n''', '''e''', '''w''', '''er</w>''']
_lowerCamelCase : str = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
_lowerCamelCase : Any = tokens + [tokenizer.unk_token]
_lowerCamelCase : Union[str, Any] = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
@require_ftfy
def a__ ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
_lowerCamelCase : Any = '''A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_lowerCamelCase : Any = '''xa\u0303y''' + ''' ''' + '''x\xe3y'''
_lowerCamelCase : Tuple = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of space type
_lowerCamelCase : Optional[int] = [
'''\u0009''', # (horizontal tab, '\t')
'''\u000B''', # (vertical tab)
'''\u000C''', # (form feed)
'''\u0020''', # (space, ' ')
'''\u200E''', # (left-to-right mark):w
'''\u200F''', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_lowerCamelCase : Optional[int] = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : Any = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Test that the tokenization is identical on unicode of line break type
_lowerCamelCase : Union[str, Any] = [
'''\u000A''', # (line feed, '\n')
'''\r\n''', # (carriage return and line feed, '\r\n')
'''\u000D''', # (carriage return, '\r')
'''\r''', # (carriage return, '\r')
'''\u000D''', # (carriage return, '\r')
'''\u2028''', # (line separator)
'''\u2029''', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_lowerCamelCase : Dict = tokenizer_s.tokenize(_lowercase )
_lowerCamelCase : str = tokenizer_r.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def a__ ( self ) -> str:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowerCamelCase : int = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
_lowerCamelCase : List[Any] = F'''{text_of_1_token} {text_of_1_token}'''
_lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
_lowerCamelCase : str = F''' {text}'''
_lowerCamelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , )
_lowerCamelCase : List[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
def a__ ( self ) -> Optional[Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(_lowercase ) as context:
self.rust_tokenizer_class.from_pretrained('''robot-test/old-clip-tokenizer''' )
self.assertTrue(
context.exception.args[0].startswith(
'''The `backend_tokenizer` provided does not match the expected format.''' ) )
@require_ftfy
def a__ ( self ) -> Tuple:
super().test_tokenization_python_rust_equals()
def a__ ( self ) -> Tuple:
# CLIP always lower cases letters
pass
| 558
| 1
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Any = ["model.decoder.embed_positions.weights"]
def __magic_name__ ( __lowerCAmelCase : Tuple ) -> Tuple:
if "emb" in name:
__lowerCamelCase = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
__lowerCamelCase = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
__lowerCamelCase = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
__lowerCamelCase = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
__lowerCamelCase = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
__lowerCamelCase = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
__lowerCamelCase = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
__lowerCamelCase = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
__lowerCamelCase = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
__lowerCamelCase = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
__lowerCamelCase = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def __magic_name__ ( __lowerCAmelCase : OrderedDict , __lowerCAmelCase : int ) -> Tuple[Dict, Dict]:
__lowerCamelCase = list(state_dict.keys() )
__lowerCamelCase = {}
for key in keys:
__lowerCamelCase = state_dict.pop(__lowerCAmelCase )
__lowerCamelCase = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
__lowerCamelCase = val[:hidden_size, :]
__lowerCamelCase = val[hidden_size : 2 * hidden_size, :]
__lowerCamelCase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowerCamelCase = val
else:
__lowerCamelCase = val
return state_dict, enc_dec_proj_state_dict
def __magic_name__ ( __lowerCAmelCase : str ) -> MusicgenDecoderConfig:
if checkpoint == "small":
# default config values
__lowerCamelCase = 1024
__lowerCamelCase = 24
__lowerCamelCase = 16
elif checkpoint == "medium":
__lowerCamelCase = 1536
__lowerCamelCase = 48
__lowerCamelCase = 24
elif checkpoint == "large":
__lowerCamelCase = 2048
__lowerCamelCase = 48
__lowerCamelCase = 32
else:
raise ValueError(f'''Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.''' )
__lowerCamelCase = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __magic_name__ ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str=None , __lowerCAmelCase : Dict="cpu" ) -> int:
__lowerCamelCase = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
__lowerCamelCase = decoder_config_from_checkpoint(__lowerCAmelCase )
__lowerCamelCase = fairseq_model.lm.state_dict()
__lowerCamelCase , __lowerCamelCase = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
__lowerCamelCase = TaEncoderModel.from_pretrained('''t5-base''' )
__lowerCamelCase = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
__lowerCamelCase = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowerCamelCase , __lowerCamelCase = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'''Missing key(s) in state_dict: {missing_keys}''' )
if len(__lowerCAmelCase ) > 0:
raise ValueError(f'''Unexpected key(s) in state_dict: {unexpected_keys}''' )
# init the composite model
__lowerCamelCase = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
__lowerCamelCase = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
__lowerCamelCase = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
__lowerCamelCase = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2048):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
__lowerCamelCase = AutoTokenizer.from_pretrained('''t5-base''' )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
__lowerCamelCase = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
__lowerCamelCase = 2048
__lowerCamelCase = 2048
# set other default generation config params
__lowerCamelCase = int(30 * audio_encoder.config.frame_rate )
__lowerCamelCase = True
__lowerCamelCase = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(f'''Saving model {checkpoint} to {pytorch_dump_folder}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(f'''Pushing model {checkpoint} to {repo_id}''' )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint",
default="small",
type=str,
help="Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.",
)
parser.add_argument(
"--pytorch_dump_folder",
required=True,
default=None,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
parser.add_argument(
"--device", default="cpu", type=str, help="Torch device to run the conversion, either cpu or cuda."
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 298
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __A ( self : Tuple ) -> Optional[int]:
__lowerCamelCase = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
__lowerCamelCase = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__lowerCamelCase = model(SCREAMING_SNAKE_CASE__ )['''last_hidden_state''']
__lowerCamelCase = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
# compare the actual values for a slice.
__lowerCamelCase = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 298
| 1
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : Union[str, Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
# See all MVP models at https://huggingface.co/models?filter=mvp
lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''',
},
'''added_tokens.json''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''',
},
'''merges_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''',
},
}
lowerCamelCase : Any = {
'''RUCAIBox/mvp''': 1024,
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = MvpTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase="replace" , __UpperCamelCase="<s>" , __UpperCamelCase="</s>" , __UpperCamelCase="</s>" , __UpperCamelCase="<s>" , __UpperCamelCase="<unk>" , __UpperCamelCase="<pad>" , __UpperCamelCase="<mask>" , __UpperCamelCase=False , __UpperCamelCase=True , **__UpperCamelCase , )-> Any:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , errors=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , trim_offsets=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__lowerCAmelCase = getattr(__UpperCamelCase , pre_tok_state.pop("type" ) )
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = pre_tok_class(**__UpperCamelCase )
__lowerCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
__lowerCAmelCase = "post_processor"
__lowerCAmelCase = getattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
if tokenizer_component_instance:
__lowerCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__lowerCAmelCase = tuple(state["sep"] )
if "cls" in state:
__lowerCAmelCase = tuple(state["cls"] )
__lowerCAmelCase = False
if state.get("add_prefix_space" , __UpperCamelCase ) != add_prefix_space:
__lowerCAmelCase = add_prefix_space
__lowerCAmelCase = True
if state.get("trim_offsets" , __UpperCamelCase ) != trim_offsets:
__lowerCAmelCase = trim_offsets
__lowerCAmelCase = True
if changes_to_apply:
__lowerCAmelCase = getattr(__UpperCamelCase , state.pop("type" ) )
__lowerCAmelCase = component_class(**__UpperCamelCase )
setattr(self.backend_tokenizer , __UpperCamelCase , __UpperCamelCase )
@property
def __UpperCAmelCase ( self )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self , __UpperCamelCase )-> Dict:
__lowerCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else value
__lowerCAmelCase = value
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
__lowerCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , *__UpperCamelCase , **__UpperCamelCase )-> BatchEncoding:
__lowerCAmelCase = kwargs.get("is_split_into_words" , __UpperCamelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*__UpperCamelCase , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> Tuple[str]:
__lowerCAmelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase=None )-> Optional[Any]:
__lowerCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None )-> List[int]:
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 290
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _UpperCamelCase (unittest.TestCase ):
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__lowerCAmelCase = {
"do_resize": True,
"size": {"height": 1_8, "width": 1_8},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
__lowerCAmelCase = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self )-> int:
__lowerCAmelCase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCAmelCase ( self )-> Optional[Any]:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__lowerCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase , padding_value=1.0 )
__lowerCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Union[str, Any]:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__UpperCamelCase , return_tensors="np" )
__lowerCAmelCase = processor(images=__UpperCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self )-> Dict:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = processor(text=__UpperCamelCase )
__lowerCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self )-> List[str]:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__UpperCamelCase ):
processor()
def __UpperCAmelCase ( self )-> Optional[int]:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase = processor.batch_decode(__UpperCamelCase )
__lowerCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Any:
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = VisionTextDualEncoderProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
__lowerCAmelCase = "lower newer"
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 290
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowercase__ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : str = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98
|
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
def lowerCAmelCase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowerCAmelCase__ ( self ):
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self ):
__magic_name__ = '''stabilityai/stable-diffusion-2'''
__magic_name__ , __magic_name__ = FlaxDPMSolverMultistepScheduler.from_pretrained(UpperCamelCase_ , subfolder='''scheduler''' )
__magic_name__ , __magic_name__ = FlaxStableDiffusionPipeline.from_pretrained(
UpperCamelCase_ , scheduler=UpperCamelCase_ , revision='''bf16''' , dtype=jnp.bfloataa , )
__magic_name__ = scheduler_params
__magic_name__ = '''A painting of a squirrel eating a burger'''
__magic_name__ = jax.device_count()
__magic_name__ = num_samples * [prompt]
__magic_name__ = sd_pipe.prepare_inputs(UpperCamelCase_ )
__magic_name__ = replicate(UpperCamelCase_ )
__magic_name__ = shard(UpperCamelCase_ )
__magic_name__ = jax.random.PRNGKey(0 )
__magic_name__ = jax.random.split(UpperCamelCase_ , jax.device_count() )
__magic_name__ = sd_pipe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , num_inference_steps=25 , jit=UpperCamelCase_ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__magic_name__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__magic_name__ = images[0, 253:256, 253:256, -1]
__magic_name__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__magic_name__ = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 490
| 0
|
import numpy as np
__lowerCamelCase : Union[str, Any] = [
["""a""", """b""", """c""", """d""", """e"""],
["""f""", """g""", """h""", """i""", """k"""],
["""l""", """m""", """n""", """o""", """p"""],
["""q""", """r""", """s""", """t""", """u"""],
["""v""", """w""", """x""", """y""", """z"""],
]
class A__ :
def __init__( self ):
'''simple docstring'''
UpperCamelCase : Dict = np.array(A_ )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Any = np.where(letter == self.SQUARE )
UpperCamelCase : Union[str, Any] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __UpperCamelCase( self , A_ , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = message.lower()
UpperCamelCase : int = message.replace(" " , "" )
UpperCamelCase : Optional[int] = message.replace("j" , "i" )
UpperCamelCase : int = np.empty((2, len(A_ )) )
for letter_index in range(len(A_ ) ):
UpperCamelCase : Optional[int] = self.letter_to_numbers(message[letter_index] )
UpperCamelCase : Optional[int] = numbers[0]
UpperCamelCase : List[Any] = numbers[1]
UpperCamelCase : Any = first_step.reshape(2 * len(A_ ) )
UpperCamelCase : List[Any] = ""
for numbers_index in range(len(A_ ) ):
UpperCamelCase : int = int(second_step[numbers_index * 2] )
UpperCamelCase : int = int(second_step[(numbers_index * 2) + 1] )
UpperCamelCase : List[Any] = self.numbers_to_letter(A_ , A_ )
UpperCamelCase : str = encoded_message + letter
return encoded_message
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
UpperCamelCase : List[Any] = message.lower()
message.replace(" " , "" )
UpperCamelCase : Any = np.empty(2 * len(A_ ) )
for letter_index in range(len(A_ ) ):
UpperCamelCase : Tuple = self.letter_to_numbers(message[letter_index] )
UpperCamelCase : Optional[int] = numbers[0]
UpperCamelCase : List[str] = numbers[1]
UpperCamelCase : List[str] = first_step.reshape((2, len(A_ )) )
UpperCamelCase : int = ""
for numbers_index in range(len(A_ ) ):
UpperCamelCase : str = int(second_step[0, numbers_index] )
UpperCamelCase : Dict = int(second_step[1, numbers_index] )
UpperCamelCase : str = self.numbers_to_letter(A_ , A_ )
UpperCamelCase : Optional[int] = decoded_message + letter
return decoded_message
| 38
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( __snake_case , unittest.TestCase ):
_UpperCAmelCase :str = KandinskyVaaPipeline
_UpperCAmelCase :str = [
'image_embeds',
'negative_image_embeds',
]
_UpperCAmelCase :str = ['image_embeds', 'negative_image_embeds']
_UpperCAmelCase :List[str] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCAmelCase :List[str] = False
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 32
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return 100
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**A_ )
return model
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __UpperCamelCase( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.dummy_unet
UpperCamelCase : Optional[Any] = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
UpperCamelCase : Tuple = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __UpperCamelCase( self , A_ , A_=0 ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A_ ) ).to(A_ )
UpperCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A_ )
if str(A_ ).startswith("mps" ):
UpperCamelCase : Optional[Any] = torch.manual_seed(A_ )
else:
UpperCamelCase : List[Any] = torch.Generator(device=A_ ).manual_seed(A_ )
UpperCamelCase : Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = "cpu"
UpperCamelCase : List[str] = self.get_dummy_components()
UpperCamelCase : Tuple = self.pipeline_class(**A_ )
UpperCamelCase : List[str] = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(A_ ) )
UpperCamelCase : Optional[int] = output.images
UpperCamelCase : int = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
UpperCamelCase : Tuple = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : int = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
UpperCamelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
UpperCamelCase : Dict = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
UpperCamelCase : Tuple = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
UpperCamelCase : str = "red cat, 4k photo"
UpperCamelCase : str = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Tuple = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : int = torch.Generator(device="cuda" ).manual_seed(0 )
UpperCamelCase : Tuple = pipeline(
image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=100 , output_type="np" , )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(A_ , A_ )
| 38
| 1
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def A_ ( _lowerCAmelCase : int ):
"""simple docstring"""
_lowerCamelCase : List[str] = year % 19
_lowerCamelCase : Dict = year % 4
_lowerCamelCase : str = year % 7
_lowerCamelCase : Any = math.floor(year / 100 )
_lowerCamelCase : Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25 )
_lowerCamelCase : str = leap_day_inhibits / 4
_lowerCamelCase : Tuple = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
_lowerCamelCase : int = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
_lowerCamelCase : str = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
_lowerCamelCase : Any = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(_lowerCAmelCase , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(_lowerCAmelCase , 4 , 18 )
else:
return datetime(_lowerCAmelCase , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1994, 2000, 2010, 2021, 2023):
UpperCAmelCase_ : Any = 'will be' if year > datetime.now().year else 'was'
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 44
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def UpperCAmelCase__ (lowerCAmelCase_=None ):
'''simple docstring'''
if subparsers is not None:
__SCREAMING_SNAKE_CASE = subparsers.add_parser("env" )
else:
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.__version__
__SCREAMING_SNAKE_CASE = torch.cuda.is_available()
__SCREAMING_SNAKE_CASE = is_xpu_available()
__SCREAMING_SNAKE_CASE = is_npu_available()
__SCREAMING_SNAKE_CASE = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file ).to_dict()
__SCREAMING_SNAKE_CASE = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""",
"PyTorch XPU available": str(lowerCAmelCase_ ),
"PyTorch NPU available": str(lowerCAmelCase_ ),
"System RAM": f"""{psutil.virtual_memory().total / 1024 ** 3:.2f} GB""",
}
if pt_cuda_available:
__SCREAMING_SNAKE_CASE = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
__SCREAMING_SNAKE_CASE = (
"\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ )
else f"""\t{accelerate_config}"""
)
print(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = accelerate_config
return info
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = env_command_parser()
__SCREAMING_SNAKE_CASE = parser.parse_args()
env_command(lowerCAmelCase_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 682
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( _A : str )-> str:
"""simple docstring"""
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def UpperCamelCase ( _A : Dict )-> str:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(_A , _A , bias=_A )
A__ = emb.weight.data
return lin_layer
def UpperCamelCase ( _A : Optional[Any] , _A : List[str]=None )-> Optional[int]:
"""simple docstring"""
A__ = {}
for old_key in state_dict.keys():
A__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A__ = key.replace("moe_layer.experts.0" , f"""ffn.experts.expert_{expert_idx}""" )
else:
A__ = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
A__ = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
A__ = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
A__ = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
A__ = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
A__ = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
A__ = key.replace("final_layer_norm" , "ff_layer_norm" )
A__ = state_dict[old_key]
return new_dict
def UpperCamelCase ( _A : int , _A : str , _A : Union[str, Any] , _A : Any , _A : str = WEIGHTS_NAME )-> Optional[int]:
"""simple docstring"""
A__ = []
A__ = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
A__ = switch_checkpoint_path + f"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
A__ = torch.load(_A )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = os.path.join(
_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{len(_A )+1:05d}-of-???.bin""" ) )
A__ = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(_A )
A__ = rename_fairseq_keys(_A , _A )
A__ = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
A__ = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
A__ = {}
for idx, shard in enumerate(_A ):
A__ = weights_name.replace(".bin" , f"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
A__ = os.path.join(_A , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
A__ = shard_file
# Add the metadata
A__ = {"total_size": total_size}
A__ = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_A , _A ) , "w" , encoding="utf-8" ) as f:
A__ = json.dumps(_A , indent=2 , sort_keys=_A ) + "\n"
f.write(_A )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
UpperCAmelCase_ : Any = parser.parse_args()
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
UpperCAmelCase_ : Dict = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
UpperCAmelCase_ : Dict = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 232
|
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : int = logging.get_logger(__name__)
def UpperCamelCase ( _A : List[str] )-> List[str]:
"""simple docstring"""
A__ = torch.load(_A , map_location="cpu" )
if "model" in sd.keys():
A__ = torch.load(_A , map_location="cpu" )["model"]
# pop unnecessary weights
A__ = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(_A )
A__ = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
A__ = sd.pop(_A )
A__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
A__ = sd[key]
# We split QKV in separate Q,K,V
A__ = key.replace(".qkv_proj." , ".q_proj." )
A__ = key.replace(".qkv_proj." , ".k_proj." )
A__ = key.replace(".qkv_proj." , ".v_proj." )
A__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
A__ , A__ , A__ = torch.split(_A , depth // 3 , dim=0 )
A__ = q
A__ = k
A__ = v
del sd[key]
return sd
@torch.no_grad()
def UpperCamelCase ( _A : Any , _A : Dict , _A : Union[str, Any]=None )-> Dict:
"""simple docstring"""
A__ = load_checkpoint(_A )
if config is not None:
A__ = OPTConfig.from_pretrained(_A )
else:
A__ = OPTConfig()
A__ = OPTModel(_A ).half().eval()
model.load_state_dict(_A )
# Check results
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 232
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , a_ : List[Any] , a_ : Optional[Any]=7 , a_ : List[Any]=3 , a_ : Optional[Any]=18 , a_ : List[Any]=30 , a_ : Any=4_00 , a_ : List[str]=True , a_ : int=None , a_ : Union[str, Any]=True , ):
lowerCAmelCase_ : str = size if size is not None else {'height': 18, 'width': 18}
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : int = batch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : str = image_size
lowerCAmelCase_ : Union[str, Any] = min_resolution
lowerCAmelCase_ : int = max_resolution
lowerCAmelCase_ : Union[str, Any] = do_resize
lowerCAmelCase_ : Optional[int] = size
lowerCAmelCase_ : Optional[Any] = apply_ocr
def lowerCamelCase ( self : Optional[int] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __lowerCamelCase ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
a_ : Optional[Any] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , "do_resize" ) )
self.assertTrue(hasattr(a_ , "size" ) )
self.assertTrue(hasattr(a_ , "apply_ocr" ) )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowerCAmelCase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def lowerCamelCase ( self : List[Any] ):
pass
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
lowerCAmelCase_ : Optional[Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
lowerCAmelCase_ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase_ : Optional[Any] = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
lowerCAmelCase_ : Any = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
lowerCAmelCase_ : Any = image_processing(a_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
lowerCAmelCase_ : Tuple = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
lowerCAmelCase_ : Optional[int] = Image.open(ds[0]["file"] ).convert("RGB" )
lowerCAmelCase_ : Tuple = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
lowerCAmelCase_ : Optional[Any] = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
lowerCAmelCase_ : Optional[int] = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
lowerCAmelCase_ : Optional[Any] = LayoutLMvaImageProcessor(apply_ocr=a_ )
lowerCAmelCase_ : Tuple = image_processing(a_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 610
|
from cva import destroyAllWindows, imread, imshow, waitKey
def A_ ( A__ ) -> Tuple:
# getting number of pixels in the image
a__ , a__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
a__ : str = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase : Dict = imread("""image_data/lena.jpg""", 1)
# convert to its negative
lowercase : Tuple = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 302
| 0
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_lowerCamelCase =WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =test_results.split(' ' )
SCREAMING_SNAKE_CASE =0
SCREAMING_SNAKE_CASE =0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
SCREAMING_SNAKE_CASE =expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =None
SCREAMING_SNAKE_CASE =False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]', lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =True
SCREAMING_SNAKE_CASE =line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
SCREAMING_SNAKE_CASE =line
SCREAMING_SNAKE_CASE =False
return failures
class a_ :
"""simple docstring"""
def __init__( self : Dict ,snake_case : str ,snake_case : Dict ):
SCREAMING_SNAKE_CASE =title
SCREAMING_SNAKE_CASE =doc_test_results['time_spent'].split(',' )[0]
SCREAMING_SNAKE_CASE =doc_test_results['success']
SCREAMING_SNAKE_CASE =doc_test_results['failures']
SCREAMING_SNAKE_CASE =self.n_success + self.n_failures
# Failures and success of the modeling tests
SCREAMING_SNAKE_CASE =doc_test_results
@property
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =[self._time_spent]
SCREAMING_SNAKE_CASE =0
for time in time_spent:
SCREAMING_SNAKE_CASE =time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case ) == 1:
SCREAMING_SNAKE_CASE =[0, 0, time_parts[0]]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return f'{int(snake_case )}h{int(snake_case )}m{int(snake_case )}s'
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _lowerCAmelCase ( self : List[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f'🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.',
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _lowerCAmelCase ( self : List[Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f'There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in'
f' {self.time}.'
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
@property
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =40
SCREAMING_SNAKE_CASE ={k: v['failed'] for k, v in doc_test_results.items() if isinstance(snake_case ,snake_case )}
SCREAMING_SNAKE_CASE =''
for category, failures in category_failures.items():
if len(snake_case ) == 0:
continue
if report != "":
report += "\n\n"
report += f'*{category} failures*:'.ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'The following examples had failures:\n\n\n{report}\n',
},
}
@property
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =[self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case )
@staticmethod
def _lowerCAmelCase ( ):
SCREAMING_SNAKE_CASE =[
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f'https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(snake_case )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text='There was an issue running the tests.' ,blocks=snake_case ,)
def _lowerCAmelCase ( self : Dict ):
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
SCREAMING_SNAKE_CASE =f'{self.n_failures} failures out of {self.n_tests} tests,' if self.n_failures else 'All tests passed.'
SCREAMING_SNAKE_CASE =client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,blocks=self.payload ,text=snake_case ,)
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[int] ,snake_case : List[str] ,snake_case : Tuple ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =''
for key, value in failures.items():
SCREAMING_SNAKE_CASE =value[:200] + ' [Truncated]' if len(snake_case ) > 250 else value
failures_text += f'*{key}*\n_{value}_\n\n'
SCREAMING_SNAKE_CASE =job_name
SCREAMING_SNAKE_CASE ={'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
SCREAMING_SNAKE_CASE ={
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _lowerCAmelCase ( self : Any ):
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
SCREAMING_SNAKE_CASE =self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
SCREAMING_SNAKE_CASE =sorted(self.doc_test_results.items() ,key=lambda snake_case : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
SCREAMING_SNAKE_CASE =f'*Num failures* :{len(job_result["failed"] )} \n'
SCREAMING_SNAKE_CASE =job_result['failures']
SCREAMING_SNAKE_CASE =self.get_reply_blocks(snake_case ,snake_case ,snake_case ,text=snake_case )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] ,text=f'Results for {job}' ,blocks=snake_case ,thread_ts=self.thread_ts['ts'] ,)
time.sleep(1 )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =os.environ['GITHUB_RUN_ID']
SCREAMING_SNAKE_CASE =F'https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100'
SCREAMING_SNAKE_CASE =requests.get(lowerCAmelCase_ ).json()
SCREAMING_SNAKE_CASE ={}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
SCREAMING_SNAKE_CASE =math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =requests.get(url + F'&page={i + 2}' ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.', lowerCAmelCase_ )
return {}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE ={}
if os.path.exists(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =os.listdir(lowerCAmelCase_ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE =f.read()
except UnicodeDecodeError as e:
raise ValueError(F'Could not open {os.path.join(lowerCAmelCase_, lowerCAmelCase_ )}.' ) from e
return _artifact
def snake_case__ ( ):
"""simple docstring"""
class a_ :
"""simple docstring"""
def __init__( self : Optional[int] ,snake_case : str ):
SCREAMING_SNAKE_CASE =name
SCREAMING_SNAKE_CASE =[]
def __str__( self : Optional[Any] ):
return self.name
def _lowerCAmelCase ( self : Tuple ,snake_case : str ):
self.paths.append({'name': self.name, 'path': path} )
SCREAMING_SNAKE_CASE ={}
SCREAMING_SNAKE_CASE =filter(os.path.isdir, os.listdir() )
for directory in directories:
SCREAMING_SNAKE_CASE =directory
if artifact_name not in _available_artifacts:
SCREAMING_SNAKE_CASE =Artifact(lowerCAmelCase_ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
_lowerCamelCase =get_job_links()
_lowerCamelCase =retrieve_available_artifacts()
_lowerCamelCase =collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_lowerCamelCase ={
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_lowerCamelCase =github_actions_job_links.get("run_doctests")
_lowerCamelCase =available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_lowerCamelCase =retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase =handle_test_results(artifact["stats"])
_lowerCamelCase =failed
_lowerCamelCase =success
_lowerCamelCase =time_spent[1:-1] + ", "
_lowerCamelCase =extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_lowerCamelCase =line.replace("FAILED ", "")
_lowerCamelCase =line.split()[0].replace("\n", "")
if "::" in line:
_lowerCamelCase , _lowerCamelCase =line.split("::")
else:
_lowerCamelCase , _lowerCamelCase =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_lowerCamelCase =docs[file_regex]
doc_test_results[category]["failed"].append(test)
_lowerCamelCase =all_failures[test] if test in all_failures else "N/A"
_lowerCamelCase =failure
break
_lowerCamelCase =Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 252
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase =16
_lowerCamelCase =32
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ = 16 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE =DatasetDict(
{
'train': dataset['train'].select(lowerCAmelCase_ ),
'validation': dataset['train'].select(lowerCAmelCase_ ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCAmelCase_ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE =datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE =16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE =8
else:
SCREAMING_SNAKE_CASE =None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['test'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader, test_dataloader
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
# Download the dataset
SCREAMING_SNAKE_CASE =load_dataset('glue', 'mrpc' )
# Create our splits
SCREAMING_SNAKE_CASE =StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
SCREAMING_SNAKE_CASE =Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE =config['lr']
SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
SCREAMING_SNAKE_CASE =int(config['seed'] )
SCREAMING_SNAKE_CASE =int(config['batch_size'] )
SCREAMING_SNAKE_CASE =evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
SCREAMING_SNAKE_CASE =1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
SCREAMING_SNAKE_CASE =batch_size // MAX_GPU_BATCH_SIZE
SCREAMING_SNAKE_CASE =MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
# New Code #
# Create our folds:
SCREAMING_SNAKE_CASE =kfold.split(np.zeros(datasets['train'].num_rows ), datasets['train']['label'] )
SCREAMING_SNAKE_CASE =[]
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =get_fold_dataloaders(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE =model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE =AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.loss
SCREAMING_SNAKE_CASE =loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
SCREAMING_SNAKE_CASE =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:', lowerCAmelCase_ )
# New Code #
# We also run predictions on the test set at the very end
SCREAMING_SNAKE_CASE =[]
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**lowerCAmelCase_ )
SCREAMING_SNAKE_CASE =outputs.logits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE =accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCAmelCase_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
SCREAMING_SNAKE_CASE =torch.cat(lowerCAmelCase_, dim=0 )
SCREAMING_SNAKE_CASE =torch.stack(lowerCAmelCase_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
SCREAMING_SNAKE_CASE =metric.compute(predictions=lowerCAmelCase_, references=lowerCAmelCase_ )
accelerator.print('Average test metrics from all folds:', lowerCAmelCase_ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds', type=lowerCAmelCase_, default=3, help='The number of splits to perform across the dataset' )
SCREAMING_SNAKE_CASE =parser.parse_args()
SCREAMING_SNAKE_CASE ={'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 252
| 1
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_lowerCamelCase = 1114112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_lowerCamelCase = 0
_lowerCamelCase = 0XE0_00
_lowerCamelCase = 0XE0_01
_lowerCamelCase = 0XE0_02
_lowerCamelCase = 0XE0_03
_lowerCamelCase = 0XE0_04
# Maps special codepoints to human-readable names.
_lowerCamelCase = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_lowerCamelCase = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self :str , __A :str=chr(__A ) , __A :str=chr(__A ) , __A :Dict=chr(__A ) , __A :str=chr(__A ) , __A :Union[str, Any]=chr(__A ) , __A :str=chr(__A ) , __A :int=False , __A :int=2048 , **__A :Dict , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , model_max_length=__A , **__A , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE__ = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE__ = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE__ = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE__ = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE__ = len(self._special_codepoints )
@property
def _snake_case ( self :Optional[Any] ) -> int:
"""simple docstring"""
return self._unicode_vocab_size
def _snake_case ( self :Tuple , __A :str ) -> List[str]:
"""simple docstring"""
return list(__A )
def _snake_case ( self :Optional[Any] , __A :str ) -> int:
"""simple docstring"""
try:
return ord(__A )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def _snake_case ( self :str , __A :int ) -> str:
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(__A )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def _snake_case ( self :Union[str, Any] , __A :Optional[int] ) -> Any:
"""simple docstring"""
return "".join(__A )
def _snake_case ( self :Optional[Any] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _snake_case ( self :List[Any] , __A :List[int] , __A :Optional[List[int]] = None , __A :bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
SCREAMING_SNAKE_CASE__ = [1] + ([0] * len(__A )) + [1]
if token_ids_a is not None:
result += ([0] * len(__A )) + [1]
return result
def _snake_case ( self :List[str] , __A :List[int] , __A :Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _snake_case ( self :int , __A :str , __A :Optional[str] = None ) -> Any:
"""simple docstring"""
return ()
| 6
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> Any:
A__ = tempfile.mkdtemp()
A__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
A__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> str:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Any:
A__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> Dict:
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
A__ = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = self.prepare_image_inputs()
A__ = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> List[Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> int:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = AlignProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
A__ = "lower newer"
A__ = self.prepare_image_inputs()
A__ = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 104
| 0
|
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Optional[Any] = ""
for i in table:
res += inp[i - 1]
return res
def _A ( lowerCamelCase ):
return data[1:] + data[0]
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : Union[str, Any] = ""
for i in range(len(lowerCamelCase ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = int("0b" + data[0] + data[-1] , 2 )
a__ : Any = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[str] = message[:4]
a__ : Dict = message[4:]
a__ : Dict = apply_table(lowerCamelCase , lowerCamelCase )
a__ : int = xor(lowerCamelCase , lowerCamelCase )
a__ : Optional[int] = apply_sbox(lowerCamelCase , temp[:4] ) # noqa: E741
a__ : str = apply_sbox(lowerCamelCase , temp[4:] )
a__ : List[str] = "0" * (2 - len(lowerCamelCase )) + l # noqa: E741
a__ : Any = "0" * (2 - len(lowerCamelCase )) + r
a__ : Union[str, Any] = apply_table(l + r , lowerCamelCase )
a__ : int = xor(lowerCamelCase , lowerCamelCase )
return temp + right
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input("""Enter 10 bit key: """)
SCREAMING_SNAKE_CASE__ : str = input("""Enter 8 bit message: """)
SCREAMING_SNAKE_CASE__ : List[str] = [6, 3, 7, 4, 8, 5, 1_0, 9]
SCREAMING_SNAKE_CASE__ : Tuple = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2, 4, 3, 1]
SCREAMING_SNAKE_CASE__ : int = [2, 6, 3, 1, 4, 8, 5, 7]
SCREAMING_SNAKE_CASE__ : int = [4, 1, 3, 5, 7, 2, 8, 6]
SCREAMING_SNAKE_CASE__ : Dict = [4, 1, 2, 3, 2, 3, 4, 1]
SCREAMING_SNAKE_CASE__ : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
SCREAMING_SNAKE_CASE__ : str = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
SCREAMING_SNAKE_CASE__ : Dict = apply_table(key, paa_table)
SCREAMING_SNAKE_CASE__ : str = temp[:5]
SCREAMING_SNAKE_CASE__ : str = temp[5:]
SCREAMING_SNAKE_CASE__ : Tuple = left_shift(left)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = left_shift(right)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_table(left + right, pa_table)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = left_shift(left)
SCREAMING_SNAKE_CASE__ : int = left_shift(right)
SCREAMING_SNAKE_CASE__ : Any = left_shift(left)
SCREAMING_SNAKE_CASE__ : List[str] = left_shift(right)
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_table(left + right, pa_table)
# encryption
SCREAMING_SNAKE_CASE__ : int = apply_table(message, IP)
SCREAMING_SNAKE_CASE__ : Dict = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ : Dict = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ : Dict = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
SCREAMING_SNAKE_CASE__ : Optional[Any] = apply_table(CT, IP)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ : str = temp[4:] + temp[:4]
SCREAMING_SNAKE_CASE__ : Tuple = function(expansion, sa, sa, keya, temp)
SCREAMING_SNAKE_CASE__ : Tuple = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 709
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : str = {
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __lowerCAmelCase ( _UpperCamelCase ):
_UpperCamelCase : Optional[Any] = """informer"""
_UpperCamelCase : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self , snake_case = None , snake_case = None , snake_case = "student_t" , snake_case = "nll" , snake_case = 1 , snake_case = None , snake_case = "mean" , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 64 , snake_case = 32 , snake_case = 32 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = 2 , snake_case = True , snake_case = "gelu" , snake_case = 0.05 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 0.1 , snake_case = 100 , snake_case = 0.02 , snake_case=True , snake_case = "prob" , snake_case = 5 , snake_case = True , **snake_case , ) -> Union[str, Any]:
"""simple docstring"""
a__ : Optional[Any] = prediction_length
a__ : Optional[int] = context_length or prediction_length
a__ : Optional[int] = distribution_output
a__ : str = loss
a__ : Optional[Any] = input_size
a__ : int = num_time_features
a__ : Optional[int] = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
a__ : Optional[int] = scaling
a__ : List[str] = num_dynamic_real_features
a__ : Optional[int] = num_static_real_features
a__ : Optional[int] = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
a__ : List[Any] = cardinality
else:
a__ : Tuple = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(snake_case ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
a__ : Tuple = embedding_dimension
else:
a__ : int = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
a__ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
a__ : int = input_size * len(self.lags_sequence ) + self._number_of_features
a__ : Union[str, Any] = d_model
a__ : Any = encoder_attention_heads
a__ : Optional[Any] = decoder_attention_heads
a__ : int = encoder_ffn_dim
a__ : List[Any] = decoder_ffn_dim
a__ : List[str] = encoder_layers
a__ : Any = decoder_layers
a__ : List[str] = dropout
a__ : int = attention_dropout
a__ : List[Any] = activation_dropout
a__ : Optional[int] = encoder_layerdrop
a__ : Tuple = decoder_layerdrop
a__ : Any = activation_function
a__ : Tuple = init_std
a__ : Optional[int] = use_cache
# Informer
a__ : Union[str, Any] = attention_type
a__ : List[str] = sampling_factor
a__ : Optional[int] = distil
super().__init__(is_encoder_decoder=snake_case , **snake_case )
@property
def _snake_case ( self ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 629
| 0
|
"""simple docstring"""
from __future__ import annotations
import requests
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"""
return requests.get(snake_case__ ).json()
def A ( snake_case__ = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"""
SCREAMING_SNAKE_CASE__ = requests.get(snake_case__ ).json()[:max_stories]
return [get_hackernews_story(snake_case__ ) for story_id in story_ids]
def A ( snake_case__ = 10 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = hackernews_top_stories(snake_case__ )
return "\n".join("""* [{title}]({url})""".format(**snake_case__ ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 196
|
"""simple docstring"""
def A ( snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = len(snake_case__ )
SCREAMING_SNAKE_CASE__ = (
first_str_length if first_str_length > second_str_length else second_str_length
)
SCREAMING_SNAKE_CASE__ = []
for char_count in range(snake_case__ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(snake_case__ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 196
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("""T""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (position - 1) // 2
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 2
class SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self : Optional[Any] )-> None:
"""simple docstring"""
lowercase__ = []
lowercase__ = {}
lowercase__ = 0
def __len__( self : List[Any] )-> int:
"""simple docstring"""
return self.elements
def __repr__( self : str )-> str:
"""simple docstring"""
return str(self.heap )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> bool:
"""simple docstring"""
return self.elements == 0
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T , a : int )-> None:
"""simple docstring"""
self.heap.append((elem, weight) )
lowercase__ = self.elements
self.elements += 1
self._bubble_up(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase__ , lowercase__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__ , lowercase__ = self.heap[0]
self._bubble_down(a )
return elem
def SCREAMING_SNAKE_CASE_ ( self : Any , a : T , a : int )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ = (elem, weight)
if position > 0:
lowercase__ = get_parent_position(a )
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(a )
else:
self._bubble_down(a )
else:
self._bubble_down(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : T )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__ = get_parent_position(a )
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ , lowercase__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(a , a )
return self._bubble_up(a )
return None
def SCREAMING_SNAKE_CASE_ ( self : str , a : T )-> None:
"""simple docstring"""
lowercase__ = self.position_map[elem]
lowercase__ , lowercase__ = self.heap[curr_pos]
lowercase__ = get_child_left_position(a )
lowercase__ = get_child_right_position(a )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
if child_left_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
else:
return None
if child_right_position < self.elements:
lowercase__ , lowercase__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(a , a )
return self._bubble_down(a )
return None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : int )-> None:
"""simple docstring"""
lowercase__ = self.heap[nodea_pos][0]
lowercase__ = self.heap[nodea_pos][0]
lowercase__ , lowercase__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__ = nodea_pos
lowercase__ = nodea_pos
class SCREAMING_SNAKE_CASE (Generic[T] ):
def __init__( self : Union[str, Any] )-> None:
"""simple docstring"""
lowercase__ = {}
lowercase__ = 0
def __repr__( self : Dict )-> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Tuple )-> int:
"""simple docstring"""
return self.nodes
def SCREAMING_SNAKE_CASE_ ( self : Any , a : T )-> None:
"""simple docstring"""
if node not in self.connections:
lowercase__ = {}
self.nodes += 1
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : T , a : T , a : int )-> None:
"""simple docstring"""
self.add_node(a )
self.add_node(a )
lowercase__ = weight
lowercase__ = weight
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]:
lowercase__ = {node: maxsize for node in graph.connections}
lowercase__ = {node: None for node in graph.connections}
lowercase__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__ = priority_queue.extract_min()
lowercase__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowercase__ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(_SCREAMING_SNAKE_CASE , dist[neighbour] )
lowercase__ = node
return dist, parent
| 45
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 1
|
'''simple docstring'''
def _A ( A__ ):
"""simple docstring"""
if not isinstance(A__ , A__ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
__lowercase = str(A__ )
__lowercase = ''''''.join(sorted(A__ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _A ( A__ = 99 ):
"""simple docstring"""
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
__lowercase = 0
__lowercase = 1
while True:
if check_bouncy(A__ ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'{solution(99)}')
| 41
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : int = logging.get_logger(__name__)
lowerCAmelCase : Optional[int] = '''▁'''
lowerCAmelCase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowerCAmelCase : int = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowerCAmelCase : str = {
'''facebook/mbart-large-50-one-to-many-mmt''': 1024,
}
# fmt: off
lowerCAmelCase : Any = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class SCREAMING_SNAKE_CASE__ ( __a ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCamelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : int = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ : List[int] = []
UpperCamelCase__ : List[int] = []
def __init__( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : str="</s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : Optional[int]="<unk>" , lowerCAmelCase__ : List[str]="<pad>" , lowerCAmelCase__ : int="<mask>" , lowerCAmelCase__ : Optional[Dict[str, Any]] = None , **lowerCAmelCase__ : Optional[Any] , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
snake_case__ = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowerCAmelCase__ , tgt_lang=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCAmelCase__ , )
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCAmelCase__ ) )
snake_case__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
snake_case__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
snake_case__ = 1
snake_case__ = len(self.sp_model )
snake_case__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCAmelCase__ )
}
snake_case__ = {v: k for k, v in self.lang_code_to_id.items()}
snake_case__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
snake_case__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
snake_case__ = src_lang if src_lang is not None else """en_XX"""
snake_case__ = self.lang_code_to_id[self._src_lang]
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase_ ( self : Any ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCAmelCase_ ( self : List[Any] ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Optional[Any] , lowerCAmelCase__ : str ) -> None:
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Union[str, Any] ) -> Dict:
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , lowerCAmelCase__ : Dict ) -> None:
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase_ ( self : int ) -> Dict:
snake_case__ = {self.convert_ids_to_tokens(lowerCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(lowerCAmelCase__ , out_type=lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : str ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
snake_case__ = self.sp_model.PieceToId(lowerCAmelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : int ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : Any ) -> Any:
snake_case__ = []
snake_case__ = """"""
snake_case__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCAmelCase__ ) + token
snake_case__ = True
snake_case__ = []
else:
current_sub_tokens.append(lowerCAmelCase__ )
snake_case__ = False
out_string += self.sp_model.decode(lowerCAmelCase__ )
return out_string.strip()
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCAmelCase__ , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(lowerCAmelCase__ )
return (out_vocab_file,)
def UpperCAmelCase_ ( self : Union[str, Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
snake_case__ = [1] * len(self.prefix_tokens )
snake_case__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCAmelCase__ )) + ([0] * len(lowerCAmelCase__ )) + suffix_ones
def UpperCAmelCase_ ( self : int , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] , lowerCAmelCase__ : Optional[str] , **lowerCAmelCase__ : Union[str, Any] ) -> Any:
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
snake_case__ = src_lang
snake_case__ = self(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ )
snake_case__ = self.convert_tokens_to_ids(lowerCAmelCase__ )
snake_case__ = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str = "en_XX" , lowerCAmelCase__ : Optional[List[str]] = None , lowerCAmelCase__ : str = "ro_RO" , **lowerCAmelCase__ : Dict , ) -> BatchEncoding:
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ )
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : Tuple , lowerCAmelCase__ : str ) -> None:
snake_case__ = self.lang_code_to_id[src_lang]
snake_case__ = [self.cur_lang_code_id]
snake_case__ = [self.eos_token_id]
def UpperCAmelCase_ ( self : str , lowerCAmelCase__ : str ) -> None:
snake_case__ = self.lang_code_to_id[tgt_lang]
snake_case__ = [self.cur_lang_code_id]
snake_case__ = [self.eos_token_id]
| 214
| 0
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = ['''image_processor''', '''tokenizer''']
snake_case_ = '''LayoutLMv3ImageProcessor'''
snake_case_ = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self , A__=None , A__=None , **A__ ):
"""simple docstring"""
UpperCAmelCase_: str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , A__ , )
UpperCAmelCase_: int = kwargs.pop("feature_extractor" )
UpperCAmelCase_: Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(A__ , A__ )
def __call__( self , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = True , A__ = False , A__ = None , A__ = None , A__ = 0 , A__ = None , A__ = None , A__ = None , A__ = False , A__ = False , A__ = False , A__ = False , A__ = True , A__ = None , **A__ , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
UpperCAmelCase_: int = self.image_processor(images=A__ , return_tensors=A__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(A__ , A__ ):
UpperCAmelCase_: Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
UpperCAmelCase_: str = features["words"]
UpperCAmelCase_: Any = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=A__ , add_special_tokens=A__ , padding=A__ , truncation=A__ , max_length=A__ , stride=A__ , pad_to_multiple_of=A__ , return_token_type_ids=A__ , return_attention_mask=A__ , return_overflowing_tokens=A__ , return_special_tokens_mask=A__ , return_offsets_mapping=A__ , return_length=A__ , verbose=A__ , return_tensors=A__ , **A__ , )
# add pixel values
UpperCAmelCase_: List[str] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
UpperCAmelCase_: Tuple = self.get_overflowing_images(A__ , encoded_inputs["overflow_to_sample_mapping"] )
UpperCAmelCase_: List[str] = images
return encoded_inputs
def snake_case_ ( self , A__ , A__ ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(A__ ) != len(A__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(A__ )} and {len(A__ )}" )
return images_with_overflow
def snake_case_ ( self , *A__ , **A__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*A__ , **A__ )
def snake_case_ ( self , *A__ , **A__ ):
"""simple docstring"""
return self.tokenizer.decode(*A__ , **A__ )
@property
def snake_case_ ( self ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , A__ , )
return self.image_processor_class
@property
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , A__ , )
return self.image_processor
| 306
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = 1
UpperCAmelCase_: Optional[Any] = 3
UpperCAmelCase_: Optional[int] = (32, 32)
UpperCAmelCase_: int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A__ )
return image
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: str = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=A__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: int = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case_ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_: List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
return CLIPTextModel(A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Union[str, Any] = self.dummy_vae
UpperCAmelCase_: Optional[int] = self.dummy_text_encoder
UpperCAmelCase_: str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Optional[int] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Dict = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[int] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: int = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: List[str] = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: List[Any] = sd_pipe(
[prompt] , image=A__ , generator=A__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , return_dict=A__ , )[0]
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_: List[str] = image_from_tuple[0, -3:, -3:, -1]
UpperCAmelCase_: int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
UpperCAmelCase_: List[str] = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_: Optional[Any] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Union[str, Any] = DDPMScheduler()
UpperCAmelCase_: Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Any = self.dummy_text_encoder
UpperCAmelCase_: Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: Union[str, Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: List[str] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: str = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: int = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: Union[str, Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Any = output.images
assert image.shape[0] == 2
UpperCAmelCase_: Any = torch.Generator(device=A__ ).manual_seed(0 )
UpperCAmelCase_: Any = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_: Dict = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = self.dummy_cond_unet_upscale
UpperCAmelCase_: Dict = DDPMScheduler()
UpperCAmelCase_: int = DDIMScheduler(prediction_type="v_prediction" )
UpperCAmelCase_: Dict = self.dummy_vae
UpperCAmelCase_: Dict = self.dummy_text_encoder
UpperCAmelCase_: Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_: List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_: Union[str, Any] = Image.fromarray(np.uinta(A__ ) ).convert("RGB" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
UpperCAmelCase_: List[str] = unet.half()
UpperCAmelCase_: Union[str, Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline(
unet=A__ , low_res_scheduler=A__ , scheduler=A__ , vae=A__ , text_encoder=A__ , tokenizer=A__ , max_noise_level=350 , )
UpperCAmelCase_: Optional[int] = sd_pipe.to(A__ )
sd_pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Any = "A painting of a squirrel eating a burger"
UpperCAmelCase_: List[Any] = torch.manual_seed(0 )
UpperCAmelCase_: str = sd_pipe(
[prompt] , image=A__ , generator=A__ , num_inference_steps=2 , output_type="np" , ).images
UpperCAmelCase_: str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def snake_case_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
UpperCAmelCase_: Optional[Any] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: List[str] = "a cat sitting on a park bench"
UpperCAmelCase_: Any = torch.manual_seed(0 )
UpperCAmelCase_: Any = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
UpperCAmelCase_: Optional[int] = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Any = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing()
UpperCAmelCase_: Any = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Optional[Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def snake_case_ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_: List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
UpperCAmelCase_: Tuple = "stabilityai/stable-diffusion-x4-upscaler"
UpperCAmelCase_: Union[str, Any] = StableDiffusionUpscalePipeline.from_pretrained(
A__ , torch_dtype=torch.floataa , )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_: str = "a cat sitting on a park bench"
UpperCAmelCase_: Optional[int] = torch.manual_seed(0 )
UpperCAmelCase_: Union[str, Any] = pipe(
prompt=A__ , image=A__ , generator=A__ , num_inference_steps=5 , output_type="np" , )
UpperCAmelCase_: Any = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 306
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def UpperCamelCase ( _lowerCamelCase : Tuple ):
if "cls_token" in name:
A__ = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
A__ = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
A__ = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
A__ = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A__ = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
A__ = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
A__ = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
A__ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
A__ = name.replace("attn" , "attention.self" )
if "norm1" in name:
A__ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
A__ = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
A__ = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
A__ = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
A__ = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
A__ = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def UpperCamelCase ( _lowerCamelCase : Dict , _lowerCamelCase : int ):
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
A__ = key.split("." )
A__ = int(key_split[1] )
if "decoder_blocks" in key:
A__ = config.decoder_hidden_size
A__ = "decoder.decoder_layers."
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = config.hidden_size
A__ = "vit.encoder.layer."
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
elif "bias" in key:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def UpperCamelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] ):
A__ = ViTMAEConfig()
if "large" in checkpoint_url:
A__ = 10_24
A__ = 40_96
A__ = 24
A__ = 16
elif "huge" in checkpoint_url:
A__ = 14
A__ = 12_80
A__ = 51_20
A__ = 32
A__ = 16
A__ = ViTMAEForPreTraining(_lowerCamelCase )
A__ = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )["model"]
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
A__ = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
A__ = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
A__ = ViTMAEImageProcessor(size=config.image_size )
A__ = image_processor(images=_lowerCamelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A__ = model(**_lowerCamelCase )
A__ = outputs.logits
if "large" in checkpoint_url:
A__ = torch.tensor(
[[-0.7_3_0_9, -0.7_1_2_8, -1.0_1_6_9], [-1.0_1_6_1, -0.9_0_5_8, -1.1_8_7_8], [-1.0_4_7_8, -0.9_4_1_1, -1.1_9_1_1]] )
elif "huge" in checkpoint_url:
A__ = torch.tensor(
[[-1.1_5_9_9, -0.9_1_9_9, -1.2_2_2_1], [-1.1_9_5_2, -0.9_2_6_9, -1.2_3_0_7], [-1.2_1_4_3, -0.9_3_3_7, -1.2_2_6_2]] )
else:
A__ = torch.tensor(
[[-0.9_1_9_2, -0.8_4_8_1, -1.1_2_5_9], [-1.1_3_4_9, -1.0_0_3_4, -1.2_5_9_9], [-1.1_7_5_7, -1.0_4_2_9, -1.2_7_2_6]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__lowerCAmelCase : int =parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 440
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase : Any =logging.get_logger(__name__)
__lowerCAmelCase : int ="https://openaipublic.azureedge.net/jukebox/models/"
__lowerCAmelCase : Any ={
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCamelCase ( _lowerCamelCase : str ):
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
A__ = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
A__ = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
A__ = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
A__ = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
A__ = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def UpperCamelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : str ):
A__ = {}
import re
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
A__ = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
A__ = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_conv_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
A__ = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] )
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
A__ = re_encoder_block_proj_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
A__ = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
A__ = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[2] ) * 2 + int(groups[3] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_decoder_block_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
A__ = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_conv_out.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
A__ = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_resnet.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = int(groups[1] ) * 2 + int(groups[2] ) - 2
A__ = {"1": 1, "3": 2}[groups[-2]]
A__ = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
A__ = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
A__ = prefix + resnet_block
A__ = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
A__ = re_prior_cond_proj_in.match(_lowerCamelCase )
A__ = regex_match.groups()
A__ = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
A__ = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
A__ = original_key
A__ = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
A__ = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
A__ = original_key
A__ = original_key
A__ = value
return new_dict
@torch.no_grad()
def UpperCamelCase ( _lowerCamelCase : str=None , _lowerCamelCase : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
A__ = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
A__ = MODEL_MAPPING[model_name.split("/" )[-1]]
A__ = JukeboxConfig.from_pretrained(_lowerCamelCase )
A__ = JukeboxModel(_lowerCamelCase )
A__ = []
A__ = {}
for i, dict_name in enumerate(_lowerCamelCase ):
A__ = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
A__ = {}
for k in old_dic.keys():
if k.endswith(".b" ):
A__ = old_dic[k]
elif k.endswith(".w" ):
A__ = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
A__ = old_dic[k]
else:
A__ = old_dic[k]
A__ = "vqvae" if i == 0 else F"priors.{3 - i}"
A__ = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
A__ = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
__lowerCAmelCase : Dict =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
__lowerCAmelCase : int =parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 440
| 1
|
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _UpperCAmelCase ( A__ , unittest.TestCase ):
UpperCamelCase__ = CodeGenTokenizer
UpperCamelCase__ = CodeGenTokenizerFast
UpperCamelCase__ = True
UpperCamelCase__ = {'''add_prefix_space''': True}
UpperCamelCase__ = False
def snake_case_ ( self):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A__ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
A__ = dict(zip(a__ , range(len(a__))))
A__ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
A__ = {'''unk_token''': '''<unk>'''}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(a__) + '''\n''')
with open(self.merges_file , '''w''' , encoding='''utf-8''') as fp:
fp.write('''\n'''.join(a__))
def snake_case_ ( self , **a__):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **a__)
def snake_case_ ( self , **a__):
kwargs.update(self.special_tokens_map)
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **a__)
def snake_case_ ( self , a__):
A__ = '''lower newer'''
A__ = '''lower newer'''
return input_text, output_text
def snake_case_ ( self):
A__ = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
A__ = '''lower newer'''
A__ = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
A__ = tokenizer.tokenize(a__ , add_prefix_space=a__)
self.assertListEqual(a__ , a__)
A__ = tokens + [tokenizer.unk_token]
A__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__) , a__)
def snake_case_ ( self):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer(add_prefix_space=a__)
A__ = '''lower newer'''
# Testing tokenization
A__ = tokenizer.tokenize(a__ , add_prefix_space=a__)
A__ = rust_tokenizer.tokenize(a__)
self.assertListEqual(a__ , a__)
# Testing conversion to ids without special tokens
A__ = tokenizer.encode(a__ , add_special_tokens=a__ , add_prefix_space=a__)
A__ = rust_tokenizer.encode(a__ , add_special_tokens=a__)
self.assertListEqual(a__ , a__)
# Testing conversion to ids with special tokens
A__ = self.get_rust_tokenizer(add_prefix_space=a__)
A__ = tokenizer.encode(a__ , add_prefix_space=a__)
A__ = rust_tokenizer.encode(a__)
self.assertListEqual(a__ , a__)
# Testing the unknown token
A__ = tokens + [rust_tokenizer.unk_token]
A__ = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(a__) , a__)
def snake_case_ ( self , *a__ , **a__):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case_ ( self , a__=1_5):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
A__ = self.rust_tokenizer_class.from_pretrained(a__ , **a__)
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Simple input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
# Pair input
self.assertRaises(a__ , tokenizer_r.encode , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(a__ , tokenizer_r.encode_plus , a__ , max_length=a__ , padding='''max_length''')
# Pair input
self.assertRaises(
a__ , tokenizer_r.batch_encode_plus , a__ , max_length=a__ , padding='''max_length''' , )
def snake_case_ ( self):
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''')
# Simple input
A__ = '''This is a simple input'''
A__ = ['''This is a simple input looooooooong''', '''This is a simple input''']
A__ = ('''This is a simple input''', '''This is a pair''')
A__ = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
A__ = tokenizer.pad_token_id
A__ = tokenizer(a__ , padding='''max_length''' , max_length=3_0 , return_tensors='''np''')
A__ = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''')
A__ = tokenizer(*a__ , padding='''max_length''' , max_length=6_0 , return_tensors='''np''')
A__ = tokenizer(a__ , padding=a__ , truncate=a__ , return_tensors='''np''')
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0)
self.assertTrue(pad_token_id in out_s['''input_ids'''])
self.assertTrue(0 in out_s['''attention_mask'''])
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3)
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0])
self.assertFalse(0 in out_sa['''attention_mask'''][0])
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1])
self.assertTrue(0 in out_sa['''attention_mask'''][1])
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0)
self.assertTrue(pad_token_id in out_p['''input_ids'''])
self.assertTrue(0 in out_p['''attention_mask'''])
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2)
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0])
self.assertFalse(0 in out_pa['''attention_mask'''][0])
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1])
self.assertTrue(0 in out_pa['''attention_mask'''][1])
def snake_case_ ( self):
A__ = '''$$$'''
A__ = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=a__ , add_bos_token=a__)
A__ = '''This is a simple input'''
A__ = ['''This is a simple input 1''', '''This is a simple input 2''']
A__ = tokenizer.bos_token_id
A__ = tokenizer(a__)
A__ = tokenizer(a__)
self.assertEqual(out_s.input_ids[0] , a__)
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids))
A__ = tokenizer.decode(out_s.input_ids)
A__ = tokenizer.batch_decode(out_sa.input_ids)
self.assertEqual(decode_s.split()[0] , a__)
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa))
@slow
def snake_case_ ( self):
A__ = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''')
A__ = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
A__ = '''\nif len_a > len_b: result = a\nelse: result = b'''
A__ = tokenizer.encode(a__)
A__ = ['''^#''', re.escape('''<|endoftext|>'''), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
A__ = tokenizer.decode(a__ , truncate_before_pattern=a__)
self.assertEqual(a__ , a__)
def snake_case_ ( self):
pass
| 714
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = "T5Config"
def lowerCAmelCase__ ( UpperCamelCase_ : jnp.array , UpperCamelCase_ : int , UpperCamelCase_ : int )-> jnp.ndarray:
A__ = jnp.zeros_like(UpperCamelCase_ )
A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A__ = shifted_input_ids.at[:, 0].set(UpperCamelCase_ )
A__ = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase_ , UpperCamelCase_ )
return shifted_input_ids
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
| 526
| 0
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple=13 ,lowerCamelCase__ : Dict=7 ,lowerCamelCase__ : Dict=True ,lowerCamelCase__ : Optional[Any]=True ,lowerCamelCase__ : Union[str, Any]=True ,lowerCamelCase__ : Tuple=True ,lowerCamelCase__ : int=99 ,lowerCamelCase__ : str=24 ,lowerCamelCase__ : Dict=2 ,lowerCamelCase__ : Optional[Any]=6 ,lowerCamelCase__ : List[str]=37 ,lowerCamelCase__ : Optional[int]="gelu" ,lowerCamelCase__ : Tuple=0.1 ,lowerCamelCase__ : List[str]=0.1 ,lowerCamelCase__ : Dict=512 ,lowerCamelCase__ : Tuple=16 ,lowerCamelCase__ : Tuple=2 ,lowerCamelCase__ : List[str]=0.0_2 ,lowerCamelCase__ : Any=3 ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Union[str, Any]=1000 ,):
'''simple docstring'''
_UpperCamelCase : List[str] = parent
_UpperCamelCase : Union[str, Any] = batch_size
_UpperCamelCase : Union[str, Any] = seq_length
_UpperCamelCase : Tuple = is_training
_UpperCamelCase : Dict = use_input_mask
_UpperCamelCase : int = use_token_type_ids
_UpperCamelCase : str = use_labels
_UpperCamelCase : List[str] = vocab_size
_UpperCamelCase : int = hidden_size
_UpperCamelCase : str = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : int = hidden_act
_UpperCamelCase : Optional[int] = hidden_dropout_prob
_UpperCamelCase : Union[str, Any] = attention_probs_dropout_prob
_UpperCamelCase : List[Any] = max_position_embeddings
_UpperCamelCase : Dict = type_vocab_size
_UpperCamelCase : Tuple = type_sequence_label_size
_UpperCamelCase : Tuple = initializer_range
_UpperCamelCase : List[str] = num_labels
_UpperCamelCase : Union[str, Any] = scope
_UpperCamelCase : List[str] = range_bbox
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
_UpperCamelCase : Dict = bbox[i, j, 3]
_UpperCamelCase : Union[str, Any] = bbox[i, j, 1]
_UpperCamelCase : Optional[int] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
_UpperCamelCase : List[str] = bbox[i, j, 2]
_UpperCamelCase : str = bbox[i, j, 0]
_UpperCamelCase : int = t
_UpperCamelCase : Dict = None
if self.use_input_mask:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
_UpperCamelCase : Optional[int] = None
if self.use_token_type_ids:
_UpperCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
_UpperCamelCase : Optional[Any] = None
_UpperCamelCase : Dict = None
if self.use_labels:
_UpperCamelCase : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
_UpperCamelCase : List[str] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Union[str, Any] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Any ,):
'''simple docstring'''
_UpperCamelCase : int = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCamelCase : Optional[Any] = model(snake_case_ ,bbox=snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ )
_UpperCamelCase : Optional[Any] = model(snake_case_ ,bbox=snake_case_ ,token_type_ids=snake_case_ )
_UpperCamelCase : Union[str, Any] = model(snake_case_ ,bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def UpperCamelCase_ ( self : List[Any] ,lowerCamelCase__ : str ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple ,):
'''simple docstring'''
_UpperCamelCase : Dict = self.num_labels
_UpperCamelCase : Tuple = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCamelCase : str = model(
snake_case_ ,bbox=snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,labels=snake_case_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : Any ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,):
'''simple docstring'''
_UpperCamelCase : Any = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCamelCase : Tuple = model(
snake_case_ ,bbox=snake_case_ ,attention_mask=snake_case_ ,token_type_ids=snake_case_ ,start_positions=snake_case_ ,end_positions=snake_case_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) : Tuple = config_and_inputs
_UpperCamelCase : int = {
'input_ids': input_ids,
'bbox': bbox,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
lowercase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase__ = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
def UpperCamelCase_ ( self : Union[str, Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return True
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : str = LiltModelTester(self )
_UpperCamelCase : Optional[int] = ConfigTester(self ,config_class=snake_case_ ,hidden_size=37 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase : Dict = type
self.model_tester.create_and_check_model(*snake_case_ )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : int = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = LiltModel.from_pretrained('SCUT-DLVCLab/lilt-roberta-en-base' ).to(snake_case_ )
_UpperCamelCase : Tuple = torch.tensor([[1, 2]] ,device=snake_case_ )
_UpperCamelCase : Optional[Any] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] ,device=snake_case_ )
# forward pass
with torch.no_grad():
_UpperCamelCase : Union[str, Any] = model(input_ids=snake_case_ ,bbox=snake_case_ )
_UpperCamelCase : str = torch.Size([1, 2, 768] )
_UpperCamelCase : List[Any] = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] ,device=snake_case_ ,)
self.assertTrue(outputs.last_hidden_state.shape ,snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] ,snake_case_ ,atol=1E-3 ) )
| 195
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 163
| 0
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
return field(default_factory=lambda: default , metadata=SCREAMING_SNAKE_CASE_ )
@dataclass
class UpperCAmelCase_ :
'''simple docstring'''
__A : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
__A : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
__A : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
__A : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
__A : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
__A : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Use FP16 to accelerate inference."} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Benchmark training of model"} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Verbose memory tracing"} )
__A : bool = field(
default=UpperCamelCase , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
__A : bool = field(
default=UpperCamelCase , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Trace memory line by line"} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Save result to a CSV file"} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Save all print statements in a log file"} )
__A : bool = field(default=UpperCamelCase , metadata={"help": "Whether to print environment information"} )
__A : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
__A : str = field(
default=f"inference_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv."} , )
__A : str = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv."} , )
__A : str = field(
default=f"train_time_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
__A : str = field(
default=f"train_memory_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
__A : str = field(
default=f"env_info_{round(time() )}.csv" , metadata={"help": "CSV filename used if saving environment information."} , )
__A : str = field(
default=f"log_{round(time() )}.csv" , metadata={"help": "Log filename used if print statements are saved in log."} , )
__A : int = field(default=3 , metadata={"help": "Times an experiment will be run."} )
__A : bool = field(
default=UpperCamelCase , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _snake_case ( self ):
"""simple docstring"""
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , __A , )
def _snake_case ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _snake_case ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def _snake_case ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True
| 718
|
from __future__ import annotations
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE_ )
# We need to create solution object to save path.
lowerCamelCase : Tuple = [[0 for _ in range(SCREAMING_SNAKE_CASE_ )] for _ in range(SCREAMING_SNAKE_CASE_ )]
lowerCamelCase : Optional[int] = run_maze(SCREAMING_SNAKE_CASE_ , 0 , 0 , SCREAMING_SNAKE_CASE_ )
if solved:
print("\n".join(str(SCREAMING_SNAKE_CASE_ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
# Final check point.
if i == j == (size - 1):
lowerCamelCase : Tuple = 1
return True
lowerCamelCase : Optional[Any] = (not i < 0) and (not j < 0) # Check lower bounds
lowerCamelCase : str = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
lowerCamelCase : Optional[Any] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
lowerCamelCase : List[str] = 1
# check for directions
if (
run_maze(SCREAMING_SNAKE_CASE_ , i + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j + 1 , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , i - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
or run_maze(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , j - 1 , SCREAMING_SNAKE_CASE_ )
):
return True
lowerCamelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 231
| 0
|
"""simple docstring"""
import os
from distutils.util import strtobool
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for e in env_keys:
__SCREAMING_SNAKE_CASE = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="no" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 682
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668
| 0
|
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "AAPL" ):
'''simple docstring'''
_lowerCAmelCase : Dict = F"https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"
_lowerCAmelCase : str = BeautifulSoup(requests.get(_SCREAMING_SNAKE_CASE ).text , "html.parser" )
_lowerCAmelCase : str = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 702
|
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658
| 0
|
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def UpperCamelCase ( __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any]=1024 ):
snake_case , snake_case : List[Any] = [], []
snake_case : List[str] = list(zip(__a , __a ) )
snake_case , snake_case : Optional[Any] = sorted_examples[0]
def is_too_big(__lowerCamelCase : Any ):
return tok(__a , return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case : List[str] = new_src + " " + src
snake_case : int = new_tgt + " " + tgt
if is_too_big(__a ) or is_too_big(__a ): # cant fit, finalize example
finished_src.append(__a )
finished_tgt.append(__a )
snake_case , snake_case : Union[str, Any] = src, tgt
else: # can fit, keep adding
snake_case , snake_case : Any = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(__a )
finished_tgt.append(__a )
return finished_src, finished_tgt
def UpperCamelCase ( __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : Optional[int] ):
snake_case : Dict = Path(__a )
save_path.mkdir(exist_ok=__a )
for split in ["train"]:
snake_case , snake_case : int = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case : Union[str, Any] = [x.rstrip() for x in Path(__a ).open().readlines()]
snake_case : Dict = [x.rstrip() for x in Path(__a ).open().readlines()]
snake_case , snake_case : Any = pack_examples(__a , __a , __a , __a )
print(f"""packed {split} split from {len(__a )} examples -> {len(__a )}.""" )
Path(save_path / f"""{split}.source""" ).open("w" ).write("\n".join(__a ) )
Path(save_path / f"""{split}.target""" ).open("w" ).write("\n".join(__a ) )
for split in ["val", "test"]:
snake_case , snake_case : List[Any] = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(__a , save_path / f"""{split}.source""" )
shutil.copyfile(__a , save_path / f"""{split}.target""" )
def UpperCamelCase ( ):
snake_case : Any = argparse.ArgumentParser()
parser.add_argument("--tok_name" , type=__a , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len" , type=__a , default=128 )
parser.add_argument("--data_dir" , type=__a )
parser.add_argument("--save_path" , type=__a )
snake_case : Dict = parser.parse_args()
snake_case : Optional[int] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(__a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 204
|
"""simple docstring"""
from __future__ import annotations
import math
def _lowerCamelCase ( __a ):
if num <= 0:
SCREAMING_SNAKE_CASE_ = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(__a )
SCREAMING_SNAKE_CASE_ = [True] * (num + 1)
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = int(math.sqrt(__a ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__a )
# Set multiples of start be False
for i in range(start * start, num + 1, __a ):
if sieve[i] is True:
SCREAMING_SNAKE_CASE_ = False
start += 1
for j in range(end + 1, num + 1 ):
if sieve[j] is True:
prime.append(__a )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 626
| 0
|
"""simple docstring"""
from __future__ import annotations
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
if nth_term == "":
return [""]
UpperCAmelCase = int(_snake_case )
UpperCAmelCase = int(_snake_case )
UpperCAmelCase = []
for temp in range(int(_snake_case ) ):
series.append(F'''1 / {pow(temp + 1 , int(_snake_case ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = int(input("""Enter the last number (nth term) of the P-Series"""))
_UpperCamelCase = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 74
|
"""simple docstring"""
def _a ( _snake_case ): # noqa: E741
"""simple docstring"""
UpperCAmelCase = len(_snake_case )
UpperCAmelCase = 0
UpperCAmelCase = [0] * n
UpperCAmelCase = [False] * n
UpperCAmelCase = [False] * n
def dfs(_snake_case , _snake_case , _snake_case , _snake_case ):
if parent == root:
out_edge_count += 1
UpperCAmelCase = True
UpperCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCAmelCase = dfs(_snake_case , _snake_case , _snake_case , _snake_case )
UpperCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
UpperCAmelCase = True
# AP found via cycle
if at == low[to]:
UpperCAmelCase = True
else:
UpperCAmelCase = min(low[at] , _snake_case )
return out_edge_count
for i in range(_snake_case ):
if not visited[i]:
UpperCAmelCase = 0
UpperCAmelCase = dfs(_snake_case , _snake_case , -1 , _snake_case )
UpperCAmelCase = out_edge_count > 1
for x in range(len(_snake_case ) ):
if is_art[x] is True:
print(_snake_case )
# Adjacency list of graph
_UpperCamelCase = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 74
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
__A : jnp.ndarray
class _snake_case (nn.Module):
__A : int
__A : Tuple[int] =(16, 32, 96, 2_56)
__A : jnp.dtype =jnp.floataa
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = nn.Conv(
self.block_out_channels[0] ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
UpperCAmelCase_ : Any = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ : str = self.block_out_channels[i]
UpperCAmelCase_ : Tuple = self.block_out_channels[i + 1]
UpperCAmelCase_ : Any = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Optional[int] = nn.Conv(
_snake_case ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
blocks.append(_snake_case )
UpperCAmelCase_ : Tuple = blocks
UpperCAmelCase_ : str = nn.Conv(
self.conditioning_embedding_channels ,kernel_size=(3, 3) ,padding=((1, 1), (1, 1)) ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ):
UpperCAmelCase_ : Dict = self.conv_in(_snake_case )
UpperCAmelCase_ : int = nn.silu(_snake_case )
for block in self.blocks:
UpperCAmelCase_ : Union[str, Any] = block(_snake_case )
UpperCAmelCase_ : Tuple = nn.silu(_snake_case )
UpperCAmelCase_ : Union[str, Any] = self.conv_out(_snake_case )
return embedding
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : str ="rgb"
__A : Tuple[int] =(16, 32, 96, 2_56)
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[Any] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Any = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Tuple = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ : Union[str, Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ : int = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : str = jax.random.split(_snake_case )
UpperCAmelCase_ : Optional[int] = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.block_out_channels
UpperCAmelCase_ : Union[str, Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Dict = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : str = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Dict = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : List[Any] = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : int = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] ,block_out_channels=self.conditioning_embedding_out_channels ,)
UpperCAmelCase_ : Any = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[str] = block_out_channels[0]
UpperCAmelCase_ : Union[str, Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : int = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : int = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : List[Any] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ : List[Any] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
if not is_final_block:
UpperCAmelCase_ : int = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
controlnet_down_blocks.append(_snake_case )
UpperCAmelCase_ : int = down_blocks
UpperCAmelCase_ : Union[str, Any] = controlnet_down_blocks
# mid
UpperCAmelCase_ : int = block_out_channels[-1]
UpperCAmelCase_ : Tuple = FlaxUNetMidBlockaDCrossAttn(
in_channels=_snake_case ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,dtype=self.dtype ,)
UpperCAmelCase_ : List[str] = nn.Conv(
_snake_case ,kernel_size=(1, 1) ,padding="VALID" ,kernel_init=nn.initializers.zeros_init() ,bias_init=nn.initializers.zeros_init() ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case = 1.0 ,_snake_case = True ,_snake_case = False ,):
UpperCAmelCase_ : int = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ : Union[str, Any] = jnp.flip(_snake_case ,axis=1 )
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : str = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : str = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Union[str, Any] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : List[str] = self.conv_in(_snake_case )
UpperCAmelCase_ : Tuple = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Optional[int] = self.controlnet_cond_embedding(_snake_case )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : str = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ : str = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ : int = ()
for down_block_res_sample, controlnet_block in zip(_snake_case ,self.controlnet_down_blocks ):
UpperCAmelCase_ : List[str] = controlnet_block(_snake_case )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : str = controlnet_down_block_res_samples
UpperCAmelCase_ : List[Any] = self.controlnet_mid_block(_snake_case )
# 6. scaling
UpperCAmelCase_ : Any = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_snake_case ,mid_block_res_sample=_snake_case )
| 71
|
from __future__ import annotations
import math
def _A ( __snake_case :int , __snake_case :int , __snake_case :bool , __snake_case :list[int] , __snake_case :float ) -> int:
"""simple docstring"""
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__snake_case ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
return min(
minimax(depth + 1 , node_index * 2 , __snake_case , __snake_case , __snake_case ) , minimax(depth + 1 , node_index * 2 + 1 , __snake_case , __snake_case , __snake_case ) , )
def _A ( ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = [90, 23, 6, 33, 21, 65, 123, 3_4423]
__SCREAMING_SNAKE_CASE = math.log(len(__snake_case ) , 2 )
print("Optimal value : " , end="" )
print(minimax(0 , 0 , __snake_case , __snake_case , __snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 693
| 0
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__UpperCAmelCase : int = logging.get_logger(__name__)
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = 'linear'
UpperCAmelCase : Dict = 'cosine'
UpperCAmelCase : List[str] = 'cosine_with_restarts'
UpperCAmelCase : Tuple = 'polynomial'
UpperCAmelCase : Optional[Any] = 'constant'
UpperCAmelCase : Tuple = 'constant_with_warmup'
UpperCAmelCase : Optional[Any] = 'piecewise_constant'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = -1 ):
return LambdaLR(UpperCamelCase_ , lambda UpperCamelCase_ : 1 , last_epoch=UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = -1 ):
def lr_lambda(UpperCamelCase_ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1.0 , UpperCamelCase_ ) )
return 1.0
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , last_epoch=UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = -1 ):
_a : Optional[int] = {}
_a : int = step_rules.split(''',''' )
for rule_str in rule_list[:-1]:
_a , _a : Dict = rule_str.split(''':''' )
_a : int = int(UpperCamelCase_ )
_a : Tuple = float(UpperCamelCase_ )
_a : Optional[int] = value
_a : Optional[int] = float(rule_list[-1] )
def create_rules_function(UpperCamelCase_ , UpperCamelCase_ ):
def rule_func(UpperCamelCase_ ) -> float:
_a : Optional[int] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(UpperCamelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_a : str = create_rules_function(UpperCamelCase_ , UpperCamelCase_ )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , last_epoch=UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=-1 ):
def lr_lambda(UpperCamelCase_ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 0.5 , UpperCamelCase_ = -1 ):
def lr_lambda(UpperCamelCase_ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
_a : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(UpperCamelCase_ ) * 2.0 * progress )) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 1 , UpperCamelCase_ = -1 ):
def lr_lambda(UpperCamelCase_ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
_a : Union[str, Any] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(UpperCamelCase_ ) * progress) % 1.0) )) )
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1E-7 , UpperCamelCase_=1.0 , UpperCamelCase_=-1 ):
_a : Dict = optimizer.defaults['''lr''']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(UpperCamelCase_ ):
if current_step < num_warmup_steps:
return float(UpperCamelCase_ ) / float(max(1 , UpperCamelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_a : int = lr_init - lr_end
_a : List[Any] = num_training_steps - num_warmup_steps
_a : Optional[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_a : Tuple = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = 1 , UpperCamelCase_ = 1.0 , UpperCamelCase_ = -1 , ):
_a : str = SchedulerType(UpperCamelCase_ )
_a : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(UpperCamelCase_ , last_epoch=UpperCamelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(UpperCamelCase_ , step_rules=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , num_cycles=UpperCamelCase_ , last_epoch=UpperCamelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , power=UpperCamelCase_ , last_epoch=UpperCamelCase_ , )
return schedule_func(
UpperCamelCase_ , num_warmup_steps=UpperCamelCase_ , num_training_steps=UpperCamelCase_ , last_epoch=UpperCamelCase_ )
| 249
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json',
}
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : List[str] = 'gpt_neox_japanese'
def __init__( self : Optional[int] , __snake_case : Tuple=32000 , __snake_case : Union[str, Any]=2560 , __snake_case : List[Any]=32 , __snake_case : Any=32 , __snake_case : Tuple=4 , __snake_case : Optional[Any]="gelu" , __snake_case : Dict=1.00 , __snake_case : Optional[int]=10000 , __snake_case : Optional[Any]=2048 , __snake_case : Tuple=0.02 , __snake_case : str=1E-5 , __snake_case : List[Any]=True , __snake_case : List[Any]=31996 , __snake_case : Union[str, Any]=31999 , __snake_case : List[str]=0.1 , __snake_case : Optional[Any]=0.0 , **__snake_case : List[str] , ) -> Optional[Any]:
super().__init__(bos_token_id=__snake_case , eos_token_id=__snake_case , **__snake_case )
_a : Optional[int] = vocab_size
_a : str = max_position_embeddings
_a : str = hidden_size
_a : Tuple = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : str = intermediate_multiple_size
_a : Tuple = hidden_act
_a : List[Any] = rotary_pct
_a : Union[str, Any] = rotary_emb_base
_a : Optional[int] = initializer_range
_a : Optional[Any] = layer_norm_eps
_a : Dict = use_cache
_a : Optional[Any] = attention_dropout
_a : Optional[int] = hidden_dropout
| 249
| 1
|
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase : Tuple = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase : int = logging.WARNING
def _snake_case( ) -> Any:
lowercase : Any = os.getenv("""DATASETS_VERBOSITY""" , SCREAMING_SNAKE_CASE__ )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f"Unknown option DATASETS_VERBOSITY={env_level_str}, "
f"has to be one of: { ', '.join(log_levels.keys() ) }" )
return _default_log_level
def _snake_case( ) -> str:
return __name__.split(""".""" )[0]
def _snake_case( ) -> logging.Logger:
return logging.getLogger(_get_library_name() )
def _snake_case( ) -> None:
# Apply our default configuration to the library root logger.
lowercase : Any = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def _snake_case( ) -> None:
lowercase : Optional[int] = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def _snake_case( SCREAMING_SNAKE_CASE__ = None ) -> logging.Logger:
if name is None:
lowercase : Tuple = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> int:
return _get_library_root_logger().getEffectiveLevel()
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> None:
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Optional[int]:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Optional[int]:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Dict:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> Dict:
return set_verbosity(SCREAMING_SNAKE_CASE__ )
def _snake_case( ) -> None:
lowercase : str = False
def _snake_case( ) -> None:
lowercase : Union[str, Any] = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class __snake_case :
def __init__( self ,*snake_case ,**snake_case ): # pylint: disable=unused-argument
'''simple docstring'''
lowercase : Optional[Any] = args[0] if args else None
def __iter__( self ):
'''simple docstring'''
return iter(self._iterator )
def __getattr__( self ,snake_case ):
'''simple docstring'''
def empty_fn(*snake_case ,**snake_case ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
'''simple docstring'''
return self
def __exit__( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
return
lowercase : str = True
class __snake_case :
def __call__( self ,*snake_case ,snake_case=False ,**snake_case ):
'''simple docstring'''
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*snake_case ,**snake_case )
else:
return EmptyTqdm(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,*snake_case ,**snake_case ):
'''simple docstring'''
lowercase : Dict = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase : Optional[Any] = _tqdm_cls()
def _snake_case( ) -> bool:
global _tqdm_active
return bool(_tqdm_active )
def _snake_case( ) -> Union[str, Any]:
global _tqdm_active
lowercase : List[str] = True
def _snake_case( ) -> str:
global _tqdm_active
lowercase : Optional[Any] = False
| 336
|
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase : Tuple = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase : Union[str, Any] = 50003
lowercase : Optional[int] = 50002
@require_sentencepiece
@require_tokenizers
class __snake_case ( lowerCAmelCase , unittest.TestCase ):
_a : List[str]= PLBartTokenizer
_a : List[str]= None
_a : str= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = PLBartTokenizer(snake_case ,language_codes="""base""" ,keep_accents=snake_case )
lowercase : List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Any = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : Union[str, Any] = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 4 ,snake_case )]
self.assertListEqual(snake_case ,["""__java__""", """__python__""", """__en_XX__""", """<mask>"""] )
lowercase : Optional[int] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Tuple = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = PLBartTokenizer(snake_case ,language_codes="""multi""" ,keep_accents=snake_case )
lowercase : str = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(snake_case ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(snake_case ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
lowercase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
lowercase : Tuple = tokenizer.convert_tokens_to_ids(snake_case )
self.assertListEqual(
snake_case ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(snake_case )
self.assertListEqual(
snake_case ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
lowercase : List[Any] = tokenizer.vocab_size
lowercase : int = [tokenizer.convert_ids_to_tokens(snake_case ) for x in range(end - 7 ,snake_case )]
self.assertListEqual(
snake_case ,["""__java__""", """__python__""", """__en_XX__""", """__javascript__""", """__php__""", """__ruby__""", """__go__"""] )
lowercase : int = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
lowercase : Optional[Any] = tokenizer(snake_case ).input_ids
self.assertEqual(
tokenizer.decode(snake_case ,skip_special_tokens=snake_case ,clean_up_tokenization_spaces=snake_case ) ,snake_case ,)
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
_a : str= "uclanlp/plbart-python-en_XX"
_a : Union[str, Any]= [
"def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])",
"def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])",
]
_a : List[Any]= [
"Returns the maximum value of a b c.",
"Sums the values of a b c.",
]
_a : Optional[Any]= [
134,
5452,
3_3460,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
988,
20,
3_3456,
19,
3_3456,
771,
39,
4258,
889,
3318,
3_3441,
3_3463,
3_3465,
3_3463,
3_3449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls ):
'''simple docstring'''
lowercase : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name ,language_codes="""base""" ,src_lang="""python""" ,tgt_lang="""en_XX""" )
lowercase : Union[str, Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__java__"""] ,50001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__python__"""] ,50002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""__en_XX__"""] ,50003 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertIn(snake_case ,self.tokenizer.all_special_ids )
lowercase : List[str] = [EN_CODE, 9037, 33442, 57, 752, 153, 14, 56, 18, 9, 2]
lowercase : int = self.tokenizer.decode(snake_case ,skip_special_tokens=snake_case )
lowercase : Tuple = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=snake_case )
self.assertEqual(snake_case ,snake_case )
self.assertNotIn(self.tokenizer.eos_token ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 20]
self.assertIsInstance(src_text[0] ,snake_case )
lowercase : Tuple = 10
lowercase : List[str] = self.tokenizer(snake_case ,max_length=snake_case ,truncation=snake_case ).input_ids[0]
self.assertEqual(ids[-2] ,2 )
self.assertEqual(ids[-1] ,snake_case )
self.assertEqual(len(snake_case ) ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """__java__"""] ) ,[50004, 50001] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = tempfile.mkdtemp()
lowercase : Union[str, Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(snake_case )
lowercase : Optional[Any] = PLBartTokenizer.from_pretrained(snake_case )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,snake_case )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=snake_case ,return_tensors="""pt""" )
lowercase : List[str] = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() ,[2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] ,snake_case )
self.assertEqual(batch.decoder_input_ids[1][-1] ,2 )
self.assertEqual(batch.labels[1][-2:].tolist() ,[2, EN_CODE] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
lowercase : str = shift_tokens_right(batch["""labels"""] ,self.tokenizer.pad_token_id )
self.assertIsInstance(snake_case ,snake_case )
self.assertEqual((2, 26) ,batch.input_ids.shape )
self.assertEqual((2, 26) ,batch.attention_mask.shape )
lowercase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,snake_case )
self.assertEqual(2 ,batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id, PYTHON_CODE] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer(self.src_text ,padding=snake_case ,truncation=snake_case ,max_length=3 ,return_tensors="""pt""" )
lowercase : str = self.tokenizer(
text_target=self.tgt_text ,padding=snake_case ,truncation=snake_case ,max_length=10 ,return_tensors="""pt""" )
lowercase : int = targets["""input_ids"""]
lowercase : Tuple = shift_tokens_right(snake_case ,self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""en_XX""" ,tgt_lang="""java""" )
self.assertEqual(
nested_simplify(snake_case ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[150, 242, 2, 50003]],
"""attention_mask""": [[1, 1, 1, 1]],
# java
"""forced_bos_token_id""": 50001,
} ,)
| 336
| 1
|
"""simple docstring"""
import os
def _lowerCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.dirname(UpperCAmelCase_ ) + "/grid.txt" ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCAmelCase_ ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3, 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 562
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562
| 1
|
# Function to print upper half of diamond (pyramid)
def A__ ( __A : List[str] ) ->Any:
for i in range(0 , __A ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def A__ ( __A : Tuple ) ->Dict:
for i in range(__A , 0 , -1 ):
for _ in range(__A , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def A__ ( __A : Dict ) ->Dict:
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(__A ) # upper half
reverse_floyd(__A ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_lowerCamelCase : str = 1
while K:
_lowerCamelCase : int = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_lowerCamelCase : Union[str, Any] = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 184
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["""image_processor""", """tokenizer"""]
lowercase_ = """LayoutLMv2ImageProcessor"""
lowercase_ = ("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , lowercase__=None , lowercase__=None , **lowercase__ ):
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase__ , )
__A =kwargs.pop('''feature_extractor''' )
__A =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase__ , lowercase__ )
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = True , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0 , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = False , lowercase__ = True , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__A =self.image_processor(images=lowercase__ , return_tensors=lowercase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase__ , lowercase__ ):
__A =[text] # add batch dimension (as the image processor always adds a batch dimension)
__A =features['''words''']
__A =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=lowercase__ , add_special_tokens=lowercase__ , padding=lowercase__ , truncation=lowercase__ , max_length=lowercase__ , stride=lowercase__ , pad_to_multiple_of=lowercase__ , return_token_type_ids=lowercase__ , return_attention_mask=lowercase__ , return_overflowing_tokens=lowercase__ , return_special_tokens_mask=lowercase__ , return_offsets_mapping=lowercase__ , return_length=lowercase__ , verbose=lowercase__ , return_tensors=lowercase__ , **lowercase__ , )
# add pixel values
__A =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__A =self.get_overflowing_images(lowercase__ , encoded_inputs['''overflow_to_sample_mapping'''] )
__A =images
return encoded_inputs
def __UpperCamelCase ( self , lowercase__ , lowercase__ ):
'''simple docstring'''
__A =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(lowercase__ )} and {len(lowercase__ )}''' )
return images_with_overflow
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase__ , **lowercase__ )
def __UpperCamelCase ( self , *lowercase__ , **lowercase__ ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase__ , **lowercase__ )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase__ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowercase__ , )
return self.image_processor
| 184
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Optional[int] = logging.get_logger(__name__)
A : Union[str, Any] = '▁'
A : Optional[int] = {'vocab_file': 'spiece.model'}
A : int = {
'vocab_file': {
'google/reformer-crime-and-punishment': (
'https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'
)
}
}
A : List[str] = {
'google/reformer-crime-and-punishment': 524_288,
}
class SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE_ ):
snake_case_ : str = VOCAB_FILES_NAMES
snake_case_ : str = PRETRAINED_VOCAB_FILES_MAP
snake_case_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self , lowerCamelCase__ , lowerCamelCase__="</s>" , lowerCamelCase__="<unk>" , lowerCamelCase__=[] , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> Optional[int]:
"""simple docstring"""
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
@property
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
return self.sp_model.get_piece_size()
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
"""simple docstring"""
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , lowerCamelCase__ ) -> int:
"""simple docstring"""
__lowercase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self , lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return self.sp_model.piece_to_id(UpperCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ ) -> List[str]:
"""simple docstring"""
if index < self.sp_model.get_piece_size():
__lowercase = self.sp_model.IdToPiece(UpperCamelCase__ )
return token
def snake_case__ ( self , lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = []
__lowercase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(UpperCamelCase__ ) + token
__lowercase = []
else:
current_sub_tokens.append(UpperCamelCase__ )
out_string += self.sp_model.decode(UpperCamelCase__ )
return out_string.strip()
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Optional[Any]:
"""simple docstring"""
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__lowercase = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , """wb""" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
| 711
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
A : int = logging.get_logger(__name__)
A : Dict = ["""model.decoder.embed_positions.weights"""]
def snake_case_ ( a__ : Union[str, Any] ):
"""simple docstring"""
if "emb" in name:
__lowercase = name.replace("""emb""" ,"""model.decoder.embed_tokens""" )
if "transformer" in name:
__lowercase = name.replace("""transformer""" ,"""model.decoder""" )
if "cross_attention" in name:
__lowercase = name.replace("""cross_attention""" ,"""encoder_attn""" )
if "linear1" in name:
__lowercase = name.replace("""linear1""" ,"""fc1""" )
if "linear2" in name:
__lowercase = name.replace("""linear2""" ,"""fc2""" )
if "norm1" in name:
__lowercase = name.replace("""norm1""" ,"""self_attn_layer_norm""" )
if "norm_cross" in name:
__lowercase = name.replace("""norm_cross""" ,"""encoder_attn_layer_norm""" )
if "norm2" in name:
__lowercase = name.replace("""norm2""" ,"""final_layer_norm""" )
if "out_norm" in name:
__lowercase = name.replace("""out_norm""" ,"""model.decoder.layer_norm""" )
if "linears" in name:
__lowercase = name.replace("""linears""" ,"""lm_heads""" )
if "condition_provider.conditioners.description.output_proj" in name:
__lowercase = name.replace("""condition_provider.conditioners.description.output_proj""" ,"""enc_to_dec_proj""" )
return name
def snake_case_ ( a__ : OrderedDict ,a__ : int ):
"""simple docstring"""
__lowercase = list(state_dict.keys() )
__lowercase = {}
for key in keys:
__lowercase = state_dict.pop(a__ )
__lowercase = rename_keys(a__ )
if "in_proj_weight" in key:
# split fused qkv proj
__lowercase = val[:hidden_size, :]
__lowercase = val[hidden_size : 2 * hidden_size, :]
__lowercase = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
__lowercase = val
else:
__lowercase = val
return state_dict, enc_dec_proj_state_dict
def snake_case_ ( a__ : str ):
"""simple docstring"""
if checkpoint == "small":
# default config values
__lowercase = 10_24
__lowercase = 24
__lowercase = 16
elif checkpoint == "medium":
__lowercase = 15_36
__lowercase = 48
__lowercase = 24
elif checkpoint == "large":
__lowercase = 20_48
__lowercase = 48
__lowercase = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
__lowercase = MusicgenDecoderConfig(
hidden_size=a__ ,ffn_dim=hidden_size * 4 ,num_hidden_layers=a__ ,num_attention_heads=a__ ,)
return config
@torch.no_grad()
def snake_case_ ( a__ : Optional[Any] ,a__ : Dict=None ,a__ : Tuple=None ,a__ : Optional[int]="cpu" ):
"""simple docstring"""
__lowercase = MusicGen.get_pretrained(a__ ,device=a__ )
__lowercase = decoder_config_from_checkpoint(a__ )
__lowercase = fairseq_model.lm.state_dict()
__lowercase ,__lowercase = rename_state_dict(
a__ ,hidden_size=decoder_config.hidden_size )
__lowercase = TaEncoderModel.from_pretrained("""t5-base""" )
__lowercase = EncodecModel.from_pretrained("""facebook/encodec_32khz""" )
__lowercase = MusicgenForCausalLM(a__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
__lowercase ,__lowercase = decoder.load_state_dict(a__ ,strict=a__ )
for key in missing_keys.copy():
if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(a__ )
if len(a__ ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(a__ ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
__lowercase = MusicgenForConditionalGeneration(text_encoder=a__ ,audio_encoder=a__ ,decoder=a__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(a__ )
# check we can do a forward pass
__lowercase = torch.arange(0 ,8 ,dtype=torch.long ).reshape(2 ,-1 )
__lowercase = input_ids.reshape(2 * 4 ,-1 )
with torch.no_grad():
__lowercase = model(input_ids=a__ ,decoder_input_ids=a__ ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError("""Incorrect shape for logits""" )
# now construct the processor
__lowercase = AutoTokenizer.from_pretrained("""t5-base""" )
__lowercase = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" ,padding_side="""left""" )
__lowercase = MusicgenProcessor(feature_extractor=a__ ,tokenizer=a__ )
# set the appropriate bos/pad token ids
__lowercase = 20_48
__lowercase = 20_48
# set other default generation config params
__lowercase = int(30 * audio_encoder.config.frame_rate )
__lowercase = True
__lowercase = 3.0
if pytorch_dump_folder is not None:
Path(a__ ).mkdir(exist_ok=a__ )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(a__ )
processor.save_pretrained(a__ )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(a__ )
processor.push_to_hub(a__ )
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
A : Tuple = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 163
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Dict = 'naver-clova-ix/donut-base-finetuned-docvqa'
__snake_case :Dict = (
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__snake_case :List[str] = 'document_qa'
__snake_case :List[Any] = AutoProcessor
__snake_case :str = VisionEncoderDecoderModel
__snake_case :List[Any] = ['image', 'text']
__snake_case :Optional[Any] = ['text']
def __init__( self : Any , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : Any ) -> Optional[int]:
"""simple docstring"""
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_lowerCAmelCase , **_lowerCAmelCase )
def _a ( self : Any , _lowerCAmelCase : "Image" , _lowerCAmelCase : str ) -> str:
"""simple docstring"""
__lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
__lowercase = task_prompt.replace("""{user_input}""" , _lowerCAmelCase )
__lowercase = self.pre_processor.tokenizer(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors="""pt""" ).input_ids
__lowercase = self.pre_processor(_lowerCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self : List[Any] , _lowerCAmelCase : Dict ) -> str:
"""simple docstring"""
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowerCAmelCase , ).sequences
def _a ( self : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.pre_processor.batch_decode(_lowerCAmelCase )[0]
__lowercase = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
__lowercase = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
__lowercase = re.sub(r"""<.*?>""" , """""" , _lowerCAmelCase , count=1 ).strip() # remove first task start token
__lowercase = self.pre_processor.tokenajson(_lowerCAmelCase )
return sequence["answer"]
| 80
|
'''simple docstring'''
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCamelCase__ = False
try:
UpperCamelCase__ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _UpperCAmelCase :
def __init__( self : List[Any] , a : str = None , a : list = [] ):
'''simple docstring'''
lowercase_ : Optional[Any] = 0
lowercase_ : Dict = choices
lowercase_ : List[str] = prompt
if sys.platform == "win32":
lowercase_ : List[Any] = "*"
else:
lowercase_ : Union[str, Any] = "➔ "
def lowerCAmelCase__ ( self : int , a : Optional[int] , a : str = "" ):
'''simple docstring'''
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , a )
else:
forceWrite(self.choices[index] , a )
def lowerCAmelCase__ ( self : List[Any] , a : int ):
'''simple docstring'''
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def lowerCAmelCase__ ( self : str , a : Direction , a : int = 1 ):
'''simple docstring'''
lowercase_ : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def lowerCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def lowerCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def lowerCAmelCase__ ( self : Any ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def lowerCAmelCase__ ( self : str ):
'''simple docstring'''
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(1_0 )] )
def lowerCAmelCase__ ( self : int ):
'''simple docstring'''
lowercase_ : str = int(chr(self.current_selection ) )
lowercase_ : Tuple = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def lowerCAmelCase__ ( self : Tuple , a : int = 0 ):
'''simple docstring'''
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase_ : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase_ : List[str] = int(builtins.input() )
except ValueError:
lowercase_ : List[Any] = default_choice
else:
lowercase_ : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(a , "\n" )
return choice
| 620
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1_024,
'''facebook/mbart-large-cc25''': 1_024,
}
# fmt: off
UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class __magic_name__ ( _UpperCAmelCase ):
__A : Tuple = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : Optional[Any] = ['input_ids', 'attention_mask']
__A : List[int] = []
__A : List[int] = []
def __init__( self : Tuple , snake_case__ : Any , snake_case__ : Any="<s>" , snake_case__ : Any="</s>" , snake_case__ : Dict="</s>" , snake_case__ : Optional[Any]="<s>" , snake_case__ : str="<unk>" , snake_case__ : Optional[Any]="<pad>" , snake_case__ : List[Any]="<mask>" , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=None , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[Dict[str, Any]] = None , snake_case__ : Any=None , **snake_case__ : Tuple , ):
'''simple docstring'''
lowercase :Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
lowercase :Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , mask_token=__UpperCamelCase , tokenizer_file=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
lowercase :Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
lowercase :Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase :Any = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase :str = 1
lowercase :Union[str, Any] = len(self.sp_model )
lowercase :int = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase )
}
lowercase :int = {v: k for k, v in self.lang_code_to_id.items()}
lowercase :Optional[int] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase :Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase :Dict = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase :Tuple = src_lang if src_lang is not None else '''en_XX'''
lowercase :Union[str, Any] = self.lang_code_to_id[self._src_lang]
lowercase :Union[str, Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
'''simple docstring'''
lowercase :List[str] = self.__dict__.copy()
lowercase :str = None
lowercase :int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Union[str, Any] , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase :Dict = {}
lowercase :Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __snake_case ( self : Any ):
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __snake_case ( self : Optional[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def __snake_case ( self : int , snake_case__ : str ):
'''simple docstring'''
lowercase :List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __snake_case ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
lowercase :List[Any] = [1] * len(self.prefix_tokens )
lowercase :Optional[int] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def __snake_case ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __snake_case ( self : List[Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
lowercase :List[Any] = [self.sep_token_id]
lowercase :Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] , snake_case__ : Optional[str] , **snake_case__ : Tuple ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase :Union[str, Any] = src_lang
lowercase :int = self(__UpperCamelCase , add_special_tokens=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
lowercase :Optional[int] = self.convert_tokens_to_ids(__UpperCamelCase )
lowercase :Union[str, Any] = tgt_lang_id
return inputs
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Tuple = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , snake_case__ : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __snake_case ( self : Dict , snake_case__ : Any ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase :Tuple = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __snake_case ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : Dict , snake_case__ : Dict ):
'''simple docstring'''
lowercase :Dict = ''''''.join(__UpperCamelCase ).replace(__UpperCamelCase , ''' ''' ).strip()
return out_string
def __snake_case ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase :str = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
lowercase :Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def __snake_case ( self : List[Any] , snake_case__ : List[str] , snake_case__ : str = "en_XX" , snake_case__ : Optional[List[str]] = None , snake_case__ : str = "ro_RO" , **snake_case__ : Tuple , ):
'''simple docstring'''
lowercase :List[str] = src_lang
lowercase :Union[str, Any] = tgt_lang
return super().prepare_seqaseq_batch(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
def __snake_case ( self : Tuple ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __snake_case ( self : List[str] , snake_case__ : Tuple ):
'''simple docstring'''
lowercase :Optional[int] = self.lang_code_to_id[src_lang]
lowercase :str = []
lowercase :int = [self.eos_token_id, self.cur_lang_code]
def __snake_case ( self : Optional[int] , snake_case__ : str ):
'''simple docstring'''
lowercase :List[str] = self.lang_code_to_id[lang]
lowercase :Any = []
lowercase :Any = [self.eos_token_id, self.cur_lang_code]
| 719
|
"""simple docstring"""
import unittest
import numpy as np
def lowerCamelCase (a_ :np.ndarray , a_ :np.ndarray , a_ :np.ndarray , a_ :np.ndarray | None = None , ) -> np.ndarray:
lowercase :str = np.shape(a_)
lowercase :Dict = np.shape(a_)
lowercase :Optional[int] = np.shape(a_)
if shape_a[0] != shape_b[0]:
lowercase :List[str] = (
'''Expected the same number of rows for A and B. '''
F"""Instead found A of size {shape_a} and B of size {shape_b}"""
)
raise ValueError(a_)
if shape_b[1] != shape_c[1]:
lowercase :List[str] = (
'''Expected the same number of columns for B and C. '''
F"""Instead found B of size {shape_b} and C of size {shape_c}"""
)
raise ValueError(a_)
lowercase :Optional[Any] = pseudo_inv
if a_inv is None:
try:
lowercase :int = np.linalg.inv(a_)
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''')
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase ):
def __snake_case ( self : int ):
'''simple docstring'''
lowercase :Dict = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :List[str] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[str] = np.array([[2, 1], [6, 3]] )
lowercase :Any = schur_complement(snake_case__ , snake_case__ , snake_case__ )
lowercase :int = np.block([[a, b], [b.T, c]] )
lowercase :Tuple = np.linalg.det(snake_case__ )
lowercase :Any = np.linalg.det(snake_case__ )
lowercase :List[Any] = np.linalg.det(snake_case__ )
self.assertAlmostEqual(snake_case__ , det_a * det_s )
def __snake_case ( self : Optional[int] ):
'''simple docstring'''
lowercase :Union[str, Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :Union[str, Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(snake_case__ ):
schur_complement(snake_case__ , snake_case__ , snake_case__ )
def __snake_case ( self : List[Any] ):
'''simple docstring'''
lowercase :List[str] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase :int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase :List[Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(snake_case__ ):
schur_complement(snake_case__ , snake_case__ , snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 475
| 0
|
'''simple docstring'''
def __UpperCAmelCase ( _UpperCAmelCase : list[int] ) -> list[list[int]]:
__snake_case = []
if len(_UpperCAmelCase ) == 1:
return [nums.copy()]
for _ in range(len(_UpperCAmelCase ) ):
__snake_case = nums.pop(0 )
__snake_case = permute(_UpperCAmelCase )
for perm in permutations:
perm.append(_UpperCAmelCase )
result.extend(_UpperCAmelCase )
nums.append(_UpperCAmelCase )
return result
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> List[Any]:
def backtrack(_UpperCAmelCase : Union[str, Any] ):
if start == len(_UpperCAmelCase ) - 1:
output.append(nums[:] )
else:
for i in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
__snake_case , __snake_case = nums[i], nums[start]
backtrack(start + 1 )
__snake_case , __snake_case = nums[i], nums[start] # backtrack
__snake_case = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
a : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 69
|
'''simple docstring'''
_lowerCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 432
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ : List[Any] = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 532
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _A (lowerCAmelCase__ :np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = np.nan
for i in range(lowerCAmelCase__ ):
_a = features[:, labels == i]
_a = data.mean(1 )
# Centralize the data of class i
_a = data - column_reshape(lowerCAmelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a = np.dot(lowerCAmelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
_a = features.mean(1 )
_a = np.nan
for i in range(lowerCAmelCase__ ):
_a = features[:, labels == i]
_a = data.shape[1]
_a = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_a = device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
return covariance_sum / features.shape[1]
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
if features.any():
_a = features.mean(1 )
# Center the dataset
_a = features - np.reshape(lowerCAmelCase__ , (data_mean.size, 1) )
_a = np.dot(lowerCAmelCase__ , centered_data.T ) / features.shape[1]
_a , _a = np.linalg.eigh(lowerCAmelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
_a = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_a = np.dot(filtered_eigenvectors.T , lowerCAmelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCAmelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def _A (lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :np.ndarray , lowerCAmelCase__ :int , lowerCAmelCase__ :int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_a , _a = eigh(
covariance_between_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , covariance_within_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
_a = eigenvectors[:, ::-1][:, :dimensions]
_a , _a , _a = np.linalg.svd(lowerCAmelCase__ )
_a = svd_matrix[:, 0:dimensions]
_a = np.dot(filtered_svd_matrix.T , lowerCAmelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCAmelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def _A () -> None:
'''simple docstring'''
_a = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_a = np.array([0, 0, 0, 1, 1] )
_a = 2
_a = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase__ ) as error_info:
_a = linear_discriminant_analysis(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def _A () -> None:
'''simple docstring'''
_a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_a = 2
_a = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase__ ) as error_info:
_a = principal_component_analysis(lowerCAmelCase__ , lowerCAmelCase__ )
if not np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 532
| 1
|
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( UpperCamelCase__ ):
def __init__( self , A , A ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=A , scheduler=A )
@torch.no_grad()
def __call__( self , A = 1 , A = 100 , A = None , A = None , A = True , ) -> Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
a = self.unet.config.sample_size / self.unet.config.sample_rate
a = audio_length_in_s * self.unet.config.sample_rate
a = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
F'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
F''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
a = int(A )
if sample_size % down_scale_factor != 0:
a = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
F'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
F''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
a = int(A )
a = next(iter(self.unet.parameters() ) ).dtype
a = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(A )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
a = randn_tensor(A , generator=A , device=self.device , dtype=A )
# set step values
self.scheduler.set_timesteps(A , device=audio.device )
a = self.scheduler.timesteps.to(A )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
a = self.unet(A , A ).sample
# 2. compute previous image: x_t -> t_t-1
a = self.scheduler.step(A , A , A ).prev_sample
a = audio.clamp(-1 , 1 ).float().cpu().numpy()
a = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=A )
| 515
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class a__ :
def __init__( self , A = None ) -> None:
'''simple docstring'''
if components is None:
a = []
a = list(A )
def __len__( self ) -> int:
'''simple docstring'''
return len(self.__components )
def __str__( self ) -> str:
'''simple docstring'''
return "(" + ",".join(map(A , self.__components ) ) + ")"
def __add__( self , A ) -> Vector:
'''simple docstring'''
a = len(self )
if size == len(A ):
a = [self.__components[i] + other.component(A ) for i in range(A )]
return Vector(A )
else:
raise Exception("must have the same size" )
def __sub__( self , A ) -> Vector:
'''simple docstring'''
a = len(self )
if size == len(A ):
a = [self.__components[i] - other.component(A ) for i in range(A )]
return Vector(A )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , A ) -> Vector:
'''simple docstring'''
...
@overload
def __mul__( self , A ) -> float:
'''simple docstring'''
...
def __mul__( self , A ) -> float | Vector:
'''simple docstring'''
if isinstance(A , (float, int) ):
a = [c * other for c in self.__components]
return Vector(A )
elif isinstance(A , A ) and len(self ) == len(A ):
a = len(self )
a = [self.__components[i] * other.component(A ) for i in range(A )]
return sum(A )
else: # error case
raise Exception("invalid operand!" )
def lowerCAmelCase_ ( self ) -> Vector:
'''simple docstring'''
return Vector(self.__components )
def lowerCAmelCase_ ( self , A ) -> float:
'''simple docstring'''
if isinstance(A , A ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def lowerCAmelCase_ ( self , A , A ) -> None:
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
a = value
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
a = [c**2 for c in self.__components]
return math.sqrt(sum(A ) )
def lowerCAmelCase_ ( self , A , A = False ) -> float:
'''simple docstring'''
a = self * other
a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Vector:
assert isinstance(__UpperCamelCase , __UpperCamelCase)
return Vector([0] * dimension)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> Vector:
assert isinstance(__UpperCamelCase , __UpperCamelCase) and (isinstance(__UpperCamelCase , __UpperCamelCase))
a = [0] * dimension
a = 1
return Vector(__UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Vector:
assert (
isinstance(__UpperCamelCase , __UpperCamelCase)
and isinstance(__UpperCamelCase , __UpperCamelCase)
and (isinstance(__UpperCamelCase , (int, float)))
)
return x * scalar + y
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Vector:
random.seed(__UpperCamelCase)
a = [random.randint(__UpperCamelCase , __UpperCamelCase) for _ in range(__UpperCamelCase)]
return Vector(__UpperCamelCase)
class a__ :
def __init__( self , A , A , A ) -> None:
'''simple docstring'''
a = matrix
a = w
a = h
def __str__( self ) -> str:
'''simple docstring'''
a = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] + other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , A ) -> Matrix:
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
a = []
for i in range(self.__height ):
a = [
self.__matrix[i][j] - other.component(A , A )
for j in range(self.__width )
]
matrix.append(A )
return Matrix(A , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , A ) -> Matrix:
'''simple docstring'''
...
@overload
def __mul__( self , A ) -> Vector:
'''simple docstring'''
...
def __mul__( self , A ) -> Vector | Matrix:
'''simple docstring'''
if isinstance(A , A ): # matrix-vector
if len(A ) == self.__width:
a = zero_vector(self.__height )
for i in range(self.__height ):
a = [
self.__matrix[i][j] * other.component(A )
for j in range(self.__width )
]
ans.change_component(A , sum(A ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(A , (int, float) ): # matrix-scalar
a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A , self.__width , self.__height )
return None
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.__height
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
return self.__width
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def lowerCAmelCase_ ( self , A , A , A ) -> None:
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
a = value
else:
raise Exception("change_component: indices out of bounds" )
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A ) ):
a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(A , self.__width - 1 , self.__height - 1 ).determinant()
def lowerCAmelCase_ ( self , A , A ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A , A )
else:
raise Exception("Indices out of bounds" )
def lowerCAmelCase_ ( self ) -> float:
'''simple docstring'''
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
a = [
self.__matrix[0][y] * self.cofactor(0 , A ) for y in range(self.__width )
]
return sum(A )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> Matrix:
a = [[0] * n for _ in range(__UpperCamelCase)]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> Matrix:
random.seed(__UpperCamelCase)
a = [
[random.randint(__UpperCamelCase , __UpperCamelCase) for _ in range(__UpperCamelCase)] for _ in range(__UpperCamelCase)
]
return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
| 515
| 1
|
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
"kakaobrain/align-base": "https://huggingface.co/kakaobrain/align-base/resolve/main/config.json",
}
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = '''align_text_model'''
def __init__( self , SCREAMING_SNAKE_CASE=3_05_22 , SCREAMING_SNAKE_CASE=7_68 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=30_72 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=5_12 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.0_2 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
super().__init__(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Any = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : Optional[Any] = max_position_embeddings
__lowerCAmelCase : Optional[int] = type_vocab_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : List[str] = layer_norm_eps
__lowerCAmelCase : int = position_embedding_type
__lowerCAmelCase : int = use_cache
__lowerCAmelCase : List[str] = pad_token_id
@classmethod
def snake_case ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowerCAmelCase : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = '''align_vision_model'''
def __init__( self , SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 6_00 , SCREAMING_SNAKE_CASE = 2.0 , SCREAMING_SNAKE_CASE = 3.1 , SCREAMING_SNAKE_CASE = 8 , SCREAMING_SNAKE_CASE = [3, 3, 5, 3, 5, 5, 3] , SCREAMING_SNAKE_CASE = [32, 16, 24, 40, 80, 1_12, 1_92] , SCREAMING_SNAKE_CASE = [16, 24, 40, 80, 1_12, 1_92, 3_20] , SCREAMING_SNAKE_CASE = [] , SCREAMING_SNAKE_CASE = [1, 2, 2, 2, 1, 2, 1] , SCREAMING_SNAKE_CASE = [1, 2, 2, 3, 3, 4, 1] , SCREAMING_SNAKE_CASE = [1, 6, 6, 6, 6, 6, 6] , SCREAMING_SNAKE_CASE = 0.2_5 , SCREAMING_SNAKE_CASE = "swish" , SCREAMING_SNAKE_CASE = 25_60 , SCREAMING_SNAKE_CASE = "mean" , SCREAMING_SNAKE_CASE = 0.0_2 , SCREAMING_SNAKE_CASE = 0.0_0_1 , SCREAMING_SNAKE_CASE = 0.9_9 , SCREAMING_SNAKE_CASE = 0.2 , **SCREAMING_SNAKE_CASE , ) -> List[str]:
super().__init__(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = image_size
__lowerCAmelCase : Union[str, Any] = width_coefficient
__lowerCAmelCase : Any = depth_coefficient
__lowerCAmelCase : Tuple = depth_divisor
__lowerCAmelCase : str = kernel_sizes
__lowerCAmelCase : Dict = in_channels
__lowerCAmelCase : Union[str, Any] = out_channels
__lowerCAmelCase : str = depthwise_padding
__lowerCAmelCase : Optional[int] = strides
__lowerCAmelCase : Tuple = num_block_repeats
__lowerCAmelCase : Any = expand_ratios
__lowerCAmelCase : Union[str, Any] = squeeze_expansion_ratio
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : Any = hidden_dim
__lowerCAmelCase : Optional[Any] = pooling_type
__lowerCAmelCase : Optional[int] = initializer_range
__lowerCAmelCase : List[str] = batch_norm_eps
__lowerCAmelCase : Any = batch_norm_momentum
__lowerCAmelCase : str = drop_connect_rate
__lowerCAmelCase : str = sum(SCREAMING_SNAKE_CASE ) * 4
@classmethod
def snake_case ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__lowerCAmelCase : List[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class UpperCamelCase__ ( a ):
'''simple docstring'''
_snake_case = '''align'''
_snake_case = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=6_40 , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=0.0_2 , **SCREAMING_SNAKE_CASE , ) -> str:
super().__init__(**SCREAMING_SNAKE_CASE )
if text_config is None:
__lowerCAmelCase : Any = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__lowerCAmelCase : int = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__lowerCAmelCase : Dict = AlignTextConfig(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = AlignVisionConfig(**SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = projection_dim
__lowerCAmelCase : Optional[Any] = temperature_init_value
__lowerCAmelCase : Optional[Any] = initializer_range
@classmethod
def snake_case ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE )
def snake_case ( self ) -> Union[str, Any]:
__lowerCAmelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : Union[str, Any] = self.text_config.to_dict()
__lowerCAmelCase : List[Any] = self.vision_config.to_dict()
__lowerCAmelCase : List[str] = self.__class__.model_type
return output
| 123
|
'''simple docstring'''
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase__ ( a ):
'''simple docstring'''
@staticmethod
@abstractmethod
def snake_case ( SCREAMING_SNAKE_CASE ) -> Optional[Any]:
raise NotImplementedError()
@abstractmethod
def snake_case ( self ) -> Optional[Any]:
raise NotImplementedError()
| 123
| 1
|
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Any:
__magic_name__ : Any = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__magic_name__ : Any = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__magic_name__ : Tuple = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
__magic_name__ : Dict = tf_top_k_top_p_filtering(lowerCAmelCase__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 )
__magic_name__ : Union[str, Any] = output[output != -float("""inf""" )]
__magic_name__ : Union[str, Any] = tf.cast(
tf.where(tf.not_equal(lowerCAmelCase__ , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1e-1_2 )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@require_tf
class snake_case__ ( unittest.TestCase , _lowerCAmelCase ):
# setting framework_dependent_parameters needs to be gated, just like its contents' imports
if is_tf_available():
lowercase__ : str = {
'''AutoModelForCausalLM''': TFAutoModelForCausalLM,
'''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq,
'''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM,
'''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq,
'''LogitsProcessorList''': TFLogitsProcessorList,
'''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor,
'''create_tensor_fn''': tf.convert_to_tensor,
'''floats_tensor''': floats_tensor,
'''return_tensors''': '''tf''',
}
@slow
def __magic_name__ ( self ) -> Any:
# TF-only test: tf.saved_model export
__magic_name__ : List[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__ : Dict = 2
__magic_name__ : Optional[Any] = 2
class snake_case__ ( tf.Module ):
def __init__( self , lowerCAmelCase__ ) -> Any:
super(lowerCAmelCase__ , self ).__init__()
__magic_name__ : Tuple = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=lowerCAmelCase__ , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
__magic_name__ : Dict = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
__magic_name__ : Optional[int] = [[2, 0], [1_02, 1_03]]
__magic_name__ : int = [[1, 0], [1, 1]]
__magic_name__ : List[str] = DummyModel(model=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
__magic_name__ : Union[str, Any] = tf.saved_model.load(lowerCAmelCase__ ).signatures["""serving_default"""]
for batch_size in range(1 , len(lowerCAmelCase__ ) + 1 ):
__magic_name__ : str = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
__magic_name__ : int = serving_func(**lowerCAmelCase__ )["""sequences"""]
__magic_name__ : List[str] = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def __magic_name__ ( self ) -> Any:
# TF-only test: tf.saved_model export
__magic_name__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__ : Optional[int] = 1
__magic_name__ : Union[str, Any] = 2
class snake_case__ ( tf.Module ):
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
super(lowerCAmelCase__ , self ).__init__()
__magic_name__ : Union[str, Any] = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=lowerCAmelCase__ , )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
__magic_name__ : Dict = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
__magic_name__ : Optional[Any] = [[2], [1_02, 1_03]]
__magic_name__ : str = [[1], [1, 1]]
__magic_name__ : Union[str, Any] = DummyModel(model=lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"""serving_default""": dummy_model.serving} )
__magic_name__ : Tuple = tf.saved_model.load(lowerCAmelCase__ ).signatures["""serving_default"""]
for input_row in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
__magic_name__ : Tuple = serving_func(**lowerCAmelCase__ )["""sequences"""]
__magic_name__ : Dict = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ )
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_text
def __magic_name__ ( self ) -> List[Any]:
# TF-only test: tf.saved_model export
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=lowerCAmelCase__ )
class snake_case__ ( tf.keras.layers.Layer ):
def __init__( self ) -> Tuple:
super().__init__()
__magic_name__ : Tuple = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCAmelCase__ , """spiece.model""" ) , """rb""" ).read() )
__magic_name__ : Dict = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def __magic_name__ ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Union[str, Any] = self.tokenizer.tokenize(lowerCAmelCase__ )
__magic_name__ ,__magic_name__ : Optional[int] = text.pad_model_inputs(
lowerCAmelCase__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id )
__magic_name__ : Dict = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
return self.tokenizer.detokenize(lowerCAmelCase__ )
__magic_name__ : Tuple = CompleteSentenceTransformer()
__magic_name__ : Tuple = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
__magic_name__ : Any = complete_model(lowerCAmelCase__ )
__magic_name__ : Any = tf.keras.Model(lowerCAmelCase__ , lowerCAmelCase__ )
keras_model.save(lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[Any]:
# Has PT equivalent: this test relies on random sampling
__magic_name__ : List[Any] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 10,
"""temperature""": 0.7,
}
__magic_name__ : Optional[int] = 14
__magic_name__ : Union[str, Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__ : Tuple = """Hello, my dog is cute and"""
__magic_name__ : Dict = tokenizer(lowerCAmelCase__ , return_tensors="""tf""" )
__magic_name__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
__magic_name__ : Any = 6_38
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__magic_name__ : Optional[Any] = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
__magic_name__ : Tuple = [6_38, 1_98]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
__magic_name__ : str = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def __magic_name__ ( self ) -> Dict:
# Has PT equivalent: ample use of framework-specific code
__magic_name__ : Any = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__magic_name__ : Optional[Any] = """Hugging Face is a technology company based in New York and Paris."""
__magic_name__ : Union[str, Any] = bart_tokenizer(lowerCAmelCase__ , return_tensors="""tf""" ).input_ids
__magic_name__ : List[Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__magic_name__ : str = bart_model.generate(lowerCAmelCase__ ).numpy()
class snake_case__ ( _lowerCAmelCase ):
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Union[str, Any]:
return super().call(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Optional[Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
__magic_name__ : Optional[Any] = bart_model.generate(lowerCAmelCase__ , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
class snake_case__ ( bart_model.model.encoder.__class__ ):
def __magic_name__ ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> int:
return super().call(lowerCAmelCase__ , **lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = FakeEncoder(bart_model.config , bart_model.model.shared )
__magic_name__ : List[Any] = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__magic_name__ : Optional[Any] = bart_model.generate(lowerCAmelCase__ ).numpy()
with self.assertRaises(lowerCAmelCase__ ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCAmelCase__ , foo="""bar""" )
| 324
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : List[str] = '''Salesforce/blip-image-captioning-base'''
lowercase__ : Tuple = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
lowercase__ : Any = '''image_captioner'''
lowercase__ : Optional[int] = AutoModelForVisionaSeq
lowercase__ : List[Any] = ['''image''']
lowercase__ : Optional[Any] = ['''text''']
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
requires_backends(self , ["""vision"""] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.pre_processor(images=lowerCAmelCase__ , return_tensors="""pt""" )
def __magic_name__ ( self , lowerCAmelCase__ ) -> List[str]:
return self.model.generate(**lowerCAmelCase__ )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
return self.pre_processor.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )[0].strip()
| 324
| 1
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
UpperCAmelCase : Union[str, Any] = True
except (ImportError, AttributeError):
UpperCAmelCase : Optional[Any] = object
def _a ( *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
pass
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Tuple = logging.get_logger('transformers-cli/serving')
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(_lowerCAmelCase , args.host , args.port , args.workers )
class lowerCamelCase (a__ ):
_lowercase : Optional[int] = 42
class lowerCamelCase (a__ ):
_lowercase : List[str] = 42
_lowercase : Tuple = 42
class lowerCamelCase (a__ ):
_lowercase : Tuple = 42
class lowerCamelCase (a__ ):
_lowercase : int = 42
class lowerCamelCase (a__ ):
@staticmethod
def UpperCAmelCase_ ( lowercase__ ) -> Dict:
"""simple docstring"""
_snake_case : int = parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=__A , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=__A , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=__A , default=8_888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=__A , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=__A , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=__A , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=__A , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=__A , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=__A )
def __init__( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) -> Optional[Any]:
"""simple docstring"""
_snake_case : Tuple = pipeline
_snake_case : str = host
_snake_case : List[str] = port
_snake_case : Tuple = workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(F'''Serving model over {host}:{port}''' )
_snake_case : Optional[int] = FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=__A , response_class=__A , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=__A , response_class=__A , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=__A , response_class=__A , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=__A , response_class=__A , methods=['''POST'''] , ),
] , timeout=600 , )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
run(self._app , host=self.host , port=self.port , workers=self.workers )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def UpperCAmelCase_ ( self , lowercase__ = Body(__A , embed=__A ) , lowercase__ = Body(__A , embed=__A ) ) -> int:
"""simple docstring"""
try:
_snake_case : Tuple = self._pipeline.tokenizer.tokenize(__A )
if return_ids:
_snake_case : Dict = self._pipeline.tokenizer.convert_tokens_to_ids(__A )
return ServeTokenizeResult(tokens=__A , tokens_ids=__A )
else:
return ServeTokenizeResult(tokens=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__A )} )
def UpperCAmelCase_ ( self , lowercase__ = Body(__A , embed=__A ) , lowercase__ = Body(__A , embed=__A ) , lowercase__ = Body(__A , embed=__A ) , ) -> Tuple:
"""simple docstring"""
try:
_snake_case : List[str] = self._pipeline.tokenizer.decode(__A , __A , __A )
return ServeDeTokenizeResult(model='''''' , text=__A )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(__A )} )
async def UpperCAmelCase_ ( self , lowercase__=Body(__A , embed=__A ) ) -> Any:
"""simple docstring"""
if len(__A ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_snake_case : Tuple = self._pipeline(__A )
return ServeForwardResult(output=__A )
except Exception as e:
raise HTTPException(500 , {'''error''': str(__A )} )
| 711
|
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if n == 1 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return 0
elif n == 2:
return 1
else:
_snake_case : Union[str, Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[int] = 0
_snake_case : int = 2
while digits < n:
index += 1
_snake_case : Tuple = len(str(fibonacci(lowerCAmelCase_ ) ) )
return index
def _a ( lowerCAmelCase_ = 1_000 ):
"""simple docstring"""
return fibonacci_digits_index(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 47
| 0
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : int = sin(__a )
_a : str = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : List[Any] = (1 - _cos) / 2
_a : Optional[int] = 1 - _cos
_a : Union[str, Any] = 1 + alpha
_a : Dict = -2 * _cos
_a : Any = 1 - alpha
_a : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : Optional[Any] = _sin / (2 * q_factor)
_a : List[Any] = (1 + _cos) / 2
_a : List[str] = -1 - _cos
_a : Tuple = 1 + alpha
_a : Optional[int] = -2 * _cos
_a : Optional[Any] = 1 - alpha
_a : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Union[str, Any] = sin(__a )
_a : Optional[int] = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Optional[Any] = _sin / 2
_a : str = 0
_a : List[str] = -ba
_a : Tuple = 1 + alpha
_a : Any = -2 * _cos
_a : Any = 1 - alpha
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
_a : List[str] = tau * frequency / samplerate
_a : Any = sin(__a )
_a : Optional[int] = cos(__a )
_a : Any = _sin / (2 * q_factor)
_a : int = 1 - alpha
_a : List[str] = -2 * _cos
_a : Optional[Any] = 1 + alpha
_a : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : List[Any] = tau * frequency / samplerate
_a : Optional[Any] = sin(__a )
_a : Optional[Any] = cos(__a )
_a : Optional[int] = _sin / (2 * q_factor)
_a : Dict = 10 ** (gain_db / 40)
_a : List[Any] = 1 + alpha * big_a
_a : int = -2 * _cos
_a : Dict = 1 - alpha * big_a
_a : Optional[Any] = 1 + alpha / big_a
_a : Dict = -2 * _cos
_a : int = 1 - alpha / big_a
_a : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : Optional[Any] = tau * frequency / samplerate
_a : List[Any] = sin(__a )
_a : Any = cos(__a )
_a : str = _sin / (2 * q_factor)
_a : Union[str, Any] = 10 ** (gain_db / 40)
_a : Tuple = (big_a + 1) - (big_a - 1) * _cos
_a : Tuple = (big_a + 1) + (big_a - 1) * _cos
_a : Dict = (big_a - 1) - (big_a + 1) * _cos
_a : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
_a : Tuple = 2 * sqrt(__a ) * alpha
_a : List[str] = big_a * (pmc + aaa)
_a : List[str] = 2 * big_a * mpc
_a : Union[str, Any] = big_a * (pmc - aaa)
_a : Optional[Any] = ppmc + aaa
_a : str = -2 * pmpc
_a : str = ppmc - aaa
_a : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : float ,__a : float = 1 / sqrt(2 ) ,) -> IIRFilter:
"""simple docstring"""
_a : int = tau * frequency / samplerate
_a : Tuple = sin(__a )
_a : Optional[int] = cos(__a )
_a : Dict = _sin / (2 * q_factor)
_a : Tuple = 10 ** (gain_db / 40)
_a : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
_a : List[Any] = (big_a + 1) + (big_a - 1) * _cos
_a : List[str] = (big_a - 1) - (big_a + 1) * _cos
_a : Tuple = (big_a - 1) + (big_a + 1) * _cos
_a : List[Any] = 2 * sqrt(__a ) * alpha
_a : Any = big_a * (ppmc + aaa)
_a : Dict = -2 * big_a * pmpc
_a : Tuple = big_a * (ppmc - aaa)
_a : List[str] = pmc + aaa
_a : List[str] = 2 * mpc
_a : Optional[Any] = pmc - aaa
_a : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] ,[ba, ba, ba] )
return filt
| 14
|
'''simple docstring'''
from torch import nn
def _A ( _lowerCAmelCase ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f"""Unsupported activation function: {act_fn}""" )
| 474
| 0
|
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
lowerCamelCase_ : List[Any] = grid[0]
for row_n in range(1 , len(__UpperCAmelCase ) ):
lowerCamelCase_ : Dict = grid[row_n]
lowerCamelCase_ : Optional[int] = fill_row(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Dict = grid[row_n]
return grid[-1][-1]
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
current_row[0] += row_above[0]
for cell_n in range(1 , len(__UpperCAmelCase ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Tuple , *UpperCamelCase_ : List[Any] , **UpperCamelCase_ : Tuple ) -> None:
"""simple docstring"""
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , UpperCamelCase_ , )
super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
| 418
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase : Tuple =logging.get_logger(__name__)
__lowerCAmelCase : Optional[Any] ={
"google/vit-base-patch16-224": "https://huggingface.co/vit-base-patch16-224/resolve/main/config.json",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class UpperCAmelCase ( __lowerCamelCase ):
__lowercase = "vit"
def __init__( self :List[str] , lowercase_ :Optional[Any]=7_68 , lowercase_ :Optional[Any]=12 , lowercase_ :Union[str, Any]=12 , lowercase_ :Tuple=30_72 , lowercase_ :str="gelu" , lowercase_ :List[Any]=0.0 , lowercase_ :Dict=0.0 , lowercase_ :Union[str, Any]=0.0_2 , lowercase_ :Optional[int]=1E-12 , lowercase_ :Any=2_24 , lowercase_ :Dict=16 , lowercase_ :str=3 , lowercase_ :Union[str, Any]=True , lowercase_ :List[str]=16 , **lowercase_ :Any , )-> Union[str, Any]:
super().__init__(**lowercase_ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = qkv_bias
A__ = encoder_stride
class UpperCAmelCase ( __lowerCamelCase ):
__lowercase = version.parse("""1.11""" )
@property
def UpperCAmelCase_ ( self :int )-> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCAmelCase_ ( self :Optional[Any] )-> float:
return 1E-4
| 440
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Dict = data
UpperCamelCase__ : Dict = [0x6_7_4_5_2_3_0_1, 0xE_F_C_D_A_B_8_9, 0x9_8_B_A_D_C_F_E, 0x1_0_3_2_5_4_7_6, 0xC_3_D_2_E_1_F_0]
@staticmethod
def UpperCamelCase__ ( __magic_name__, __magic_name__ ) -> str:
"""simple docstring"""
return ((n << b) | (n >> (32 - b))) & 0xF_F_F_F_F_F_F_F
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Any = b'''\x80''' + b'''\x00''' * (63 - (len(self.data ) + 8) % 64)
UpperCamelCase__ : Tuple = self.data + padding + struct.pack('''>Q''', 8 * len(self.data ) )
return padded_data
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data ), 64 )
]
def UpperCamelCase__ ( self, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : str = list(struct.unpack('''>16L''', __magic_name__ ) ) + [0] * 64
for i in range(16, 80 ):
UpperCamelCase__ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1 )
return w
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = self.padding()
UpperCamelCase__ : Dict = self.split_blocks()
for block in self.blocks:
UpperCamelCase__ : Tuple = self.expand_block(__magic_name__ )
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Any = self.h
for i in range(0, 80 ):
if 0 <= i < 20:
UpperCamelCase__ : List[str] = (b & c) | ((~b) & d)
UpperCamelCase__ : List[str] = 0x5_A_8_2_7_9_9_9
elif 20 <= i < 40:
UpperCamelCase__ : Dict = b ^ c ^ d
UpperCamelCase__ : Union[str, Any] = 0x6_E_D_9_E_B_A_1
elif 40 <= i < 60:
UpperCamelCase__ : List[str] = (b & c) | (b & d) | (c & d)
UpperCamelCase__ : int = 0x8_F_1_B_B_C_D_C
elif 60 <= i < 80:
UpperCamelCase__ : List[str] = b ^ c ^ d
UpperCamelCase__ : str = 0xC_A_6_2_C_1_D_6
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = (
self.rotate(__magic_name__, 5 ) + f + e + k + expanded_block[i] & 0xF_F_F_F_F_F_F_F,
a,
self.rotate(__magic_name__, 30 ),
c,
d,
)
UpperCamelCase__ : int = (
self.h[0] + a & 0xF_F_F_F_F_F_F_F,
self.h[1] + b & 0xF_F_F_F_F_F_F_F,
self.h[2] + c & 0xF_F_F_F_F_F_F_F,
self.h[3] + d & 0xF_F_F_F_F_F_F_F,
self.h[4] + e & 0xF_F_F_F_F_F_F_F,
)
return ("{:08x}" * 5).format(*self.h )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : List[str] = B'''Test String'''
assert SHAaHash(__UpperCAmelCase ).final_hash() == hashlib.shaa(__UpperCAmelCase ).hexdigest() # noqa: S324
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase__ : Optional[Any] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
UpperCamelCase__ : Optional[int] = parser.parse_args()
UpperCamelCase__ : Any = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
UpperCamelCase__ : List[Any] = f.read()
else:
UpperCamelCase__ : Dict = bytes(__UpperCAmelCase , '''utf-8''' )
print(SHAaHash(__UpperCAmelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 253
| 0
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _a :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=2 , _UpperCAmelCase=99 , _UpperCAmelCase=0 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase="last" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=0 , ) -> Dict:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_input_lengths
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = gelu_activation
UpperCamelCase_ = sinusoidal_embeddings
UpperCamelCase_ = causal
UpperCamelCase_ = asm
UpperCamelCase_ = n_langs
UpperCamelCase_ = vocab_size
UpperCamelCase_ = n_special
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_labels
UpperCamelCase_ = num_choices
UpperCamelCase_ = summary_type
UpperCamelCase_ = use_proj
UpperCamelCase_ = scope
UpperCamelCase_ = bos_token_id
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_input_lengths:
UpperCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase_ = None
UpperCamelCase_ = None
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase_ = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase_ = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCAmelCase ( self ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[str]:
UpperCamelCase_ = XLMModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ , lengths=snake_case_ , langs=snake_case_ )
UpperCamelCase_ = model(snake_case_ , langs=snake_case_ )
UpperCamelCase_ = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> List[str]:
UpperCamelCase_ = XLMWithLMHeadModel(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
UpperCamelCase_ = XLMForQuestionAnsweringSimple(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ )
UpperCamelCase_ = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
UpperCamelCase_ = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Union[str, Any]:
UpperCamelCase_ = XLMForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ )
UpperCamelCase_ = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , p_mask=snake_case_ , )
UpperCamelCase_ = model(
snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , cls_index=snake_case_ , is_impossible=snake_case_ , )
(UpperCamelCase_ ) = result_with_labels.to_tuple()
UpperCamelCase_ = model(snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ )
(UpperCamelCase_ ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[int]:
UpperCamelCase_ = XLMForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ )
UpperCamelCase_ = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> int:
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = XLMForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Optional[Any]:
UpperCamelCase_ = self.num_choices
UpperCamelCase_ = XLMForMultipleChoice(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase_ = model(
snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.prepare_config_and_inputs()
(
UpperCamelCase_
) = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class _a ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
"""simple docstring"""
A_ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A_ = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ) -> str:
UpperCamelCase_ = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
UpperCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case_ )
return inputs_dict
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = XLMModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case_ , emb_dim=37 )
def _UpperCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*snake_case_ )
def _UpperCAmelCase ( self ) -> int:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*snake_case_ )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*snake_case_ )
def _UpperCAmelCase ( self ) -> str:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*snake_case_ )
def _UpperCAmelCase ( self ) -> Tuple:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*snake_case_ )
def _UpperCAmelCase ( self ) -> Any:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*snake_case_ )
def _UpperCAmelCase ( self ) -> List[Any]:
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*snake_case_ )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> Optional[int]:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_attentions in attentions] , [True] * len(snake_case_ ) )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(snake_case_ ):
# adds PAD dummy token
UpperCamelCase_ = min_length + idx + 1
UpperCamelCase_ = min_length + idx + 1
UpperCamelCase_ = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(snake_case_ ) )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=1 ) -> Dict:
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertListEqual(
[isinstance(snake_case_ , snake_case_ ) for iter_hidden_states in hidden_states] , [True] * len(snake_case_ ) , )
self.assertEqual(len(snake_case_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(snake_case_ ):
# adds PAD dummy token
UpperCamelCase_ = min_length + idx + 1
UpperCamelCase_ = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(snake_case_ ) , )
pass
@slow
def _UpperCAmelCase ( self ) -> str:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = XLMModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase_ = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(snake_case_ )
UpperCamelCase_ = torch.tensor([[14, 447]] , dtype=torch.long , device=snake_case_ ) # the president
UpperCamelCase_ = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase_ = model.generate(snake_case_ , do_sample=snake_case_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , snake_case_ )
| 721
|
import os
from math import logaa
def _snake_case (__lowercase = "base_exp.txt"):
UpperCamelCase_ = 0
UpperCamelCase_ = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(__lowercase) , __lowercase))):
UpperCamelCase_ , UpperCamelCase_ = list(map(__lowercase , line.split(',')))
if x * logaa(__lowercase) > largest:
UpperCamelCase_ = x * logaa(__lowercase)
UpperCamelCase_ = i + 1
return result
if __name__ == "__main__":
print(solution())
| 618
| 0
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 51
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Any = logging.get_logger(__name__)
def A ( _A ):
"""simple docstring"""
snake_case_ :List[str] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case_ :List[str] = 128
elif "12-12" in model_name:
snake_case_ :Union[str, Any] = 12
snake_case_ :Optional[Any] = 12
elif "14-14" in model_name:
snake_case_ :Any = 14
snake_case_ :Union[str, Any] = 14
elif "16-16" in model_name:
snake_case_ :List[Any] = 16
snake_case_ :Dict = 16
else:
raise ValueError("Model not supported" )
snake_case_ :Tuple = "huggingface/label-files"
if "speech-commands" in model_name:
snake_case_ :Union[str, Any] = 35
snake_case_ :Union[str, Any] = "speech-commands-v2-id2label.json"
else:
snake_case_ :Union[str, Any] = 527
snake_case_ :List[str] = "audioset-id2label.json"
snake_case_ :Any = json.load(open(hf_hub_download(_A, _A, repo_type="dataset" ), "r" ) )
snake_case_ :Union[str, Any] = {int(_A ): v for k, v in idalabel.items()}
snake_case_ :Union[str, Any] = idalabel
snake_case_ :Any = {v: k for k, v in idalabel.items()}
return config
def A ( _A ):
"""simple docstring"""
if "module.v" in name:
snake_case_ :Optional[Any] = name.replace("module.v", "audio_spectrogram_transformer" )
if "cls_token" in name:
snake_case_ :Optional[Any] = name.replace("cls_token", "embeddings.cls_token" )
if "dist_token" in name:
snake_case_ :str = name.replace("dist_token", "embeddings.distillation_token" )
if "pos_embed" in name:
snake_case_ :Union[str, Any] = name.replace("pos_embed", "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
snake_case_ :List[str] = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
snake_case_ :Optional[Any] = name.replace("blocks", "encoder.layer" )
if "attn.proj" in name:
snake_case_ :List[Any] = name.replace("attn.proj", "attention.output.dense" )
if "attn" in name:
snake_case_ :Optional[Any] = name.replace("attn", "attention.self" )
if "norm1" in name:
snake_case_ :Tuple = name.replace("norm1", "layernorm_before" )
if "norm2" in name:
snake_case_ :Dict = name.replace("norm2", "layernorm_after" )
if "mlp.fc1" in name:
snake_case_ :int = name.replace("mlp.fc1", "intermediate.dense" )
if "mlp.fc2" in name:
snake_case_ :int = name.replace("mlp.fc2", "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case_ :Optional[int] = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
snake_case_ :Any = name.replace("module.mlp_head.0", "classifier.layernorm" )
if "module.mlp_head.1" in name:
snake_case_ :str = name.replace("module.mlp_head.1", "classifier.dense" )
return name
def A ( _A, _A ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
snake_case_ :int = orig_state_dict.pop(_A )
if "qkv" in key:
snake_case_ :Optional[Any] = key.split("." )
snake_case_ :Union[str, Any] = int(key_split[3] )
snake_case_ :str = config.hidden_size
if "weight" in key:
snake_case_ :Any = val[:dim, :]
snake_case_ :Optional[Any] = val[dim : dim * 2, :]
snake_case_ :List[Any] = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Optional[Any] = val[dim : dim * 2]
snake_case_ :Any = val[-dim:]
else:
snake_case_ :int = val
return orig_state_dict
def A ( _A ):
"""simple docstring"""
snake_case_ :Any = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(_A, _A )
@torch.no_grad()
def A ( _A, _A, _A=False ):
"""simple docstring"""
snake_case_ :Any = get_audio_spectrogram_transformer_config(_A )
snake_case_ :List[str] = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
snake_case_ :Optional[int] = model_name_to_url[model_name]
snake_case_ :str = torch.hub.load_state_dict_from_url(_A, map_location="cpu" )
# remove some keys
remove_keys(_A )
# rename some keys
snake_case_ :int = convert_state_dict(_A, _A )
# load 🤗 model
snake_case_ :str = ASTForAudioClassification(_A )
model.eval()
model.load_state_dict(_A )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case_ :int = -4.2_677_393 if "speech-commands" not in model_name else -6.845_978
snake_case_ :Union[str, Any] = 4.5_689_974 if "speech-commands" not in model_name else 5.5_654_526
snake_case_ :Union[str, Any] = 1_024 if "speech-commands" not in model_name else 128
snake_case_ :Optional[int] = ASTFeatureExtractor(mean=_A, std=_A, max_length=_A )
if "speech-commands" in model_name:
snake_case_ :Optional[int] = load_dataset("speech_commands", "v0.02", split="validation" )
snake_case_ :int = dataset[0]["audio"]["array"]
else:
snake_case_ :Any = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint", filename="sample_audio.flac", repo_type="dataset", )
snake_case_ , snake_case_ :List[Any] = torchaudio.load(_A )
snake_case_ :Optional[int] = waveform.squeeze().numpy()
snake_case_ :Any = feature_extractor(_A, sampling_rate=16_000, return_tensors="pt" )
# forward pass
snake_case_ :Dict = model(**_A )
snake_case_ :List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case_ :Optional[int] = torch.tensor([-0.8_760, -7.0_042, -8.6_602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case_ :Any = torch.tensor([-1.1_986, -7.0_903, -8.2_718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case_ :List[Any] = torch.tensor([-2.6_128, -8.0_080, -9.4_344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case_ :Dict = torch.tensor([-1.5_080, -7.4_534, -8.8_917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case_ :int = torch.tensor([-0.5_050, -6.5_833, -8.0_843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case_ :Dict = torch.tensor([-0.3_826, -7.0_336, -8.2_413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case_ :List[str] = torch.tensor([-1.2_113, -6.9_101, -8.3_470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case_ :Optional[Any] = torch.tensor([6.1_589, -8.0_566, -8.7_984] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3], _A, atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(_A ).mkdir(exist_ok=_A )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_A )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 584
| 0
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , _snake_case : Optional[int] , _snake_case : str=13 , _snake_case : Tuple=7 , _snake_case : int=True , _snake_case : Optional[Any]=True , _snake_case : Optional[int]=True , _snake_case : int=True , _snake_case : List[Any]=99 , _snake_case : Tuple=32 , _snake_case : Optional[int]=5 , _snake_case : List[str]=4 , _snake_case : int=37 , _snake_case : int="gelu" , _snake_case : Tuple=0.1 , _snake_case : Dict=0.1 , _snake_case : Optional[int]=512 , _snake_case : Dict=16 , _snake_case : Dict=2 , _snake_case : int=0.02 , _snake_case : Any=4 , ):
__lowercase : Optional[int] = parent
__lowercase : List[str] = batch_size
__lowercase : Tuple = seq_length
__lowercase : Optional[Any] = is_training
__lowercase : Union[str, Any] = use_attention_mask
__lowercase : int = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[Any] = hidden_size
__lowercase : Any = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : List[str] = intermediate_size
__lowercase : Optional[int] = hidden_act
__lowercase : Tuple = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : str = max_position_embeddings
__lowercase : Any = type_vocab_size
__lowercase : Optional[int] = type_sequence_label_size
__lowercase : int = initializer_range
__lowercase : str = num_choices
def snake_case_ ( self : Tuple ):
__lowercase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : List[str] = None
if self.use_attention_mask:
__lowercase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[int] = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_snake_case , )
return config, input_ids, attention_mask
def snake_case_ ( self : Any ):
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase : List[str] = config_and_inputs
__lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
A__ : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self : str ):
__lowercase : Optional[Any] = FlaxDistilBertModelTester(self )
@slow
def snake_case_ ( self : List[Any] ):
for model_class_name in self.all_model_classes:
__lowercase : int = model_class_name.from_pretrained('''distilbert-base-uncased''' )
__lowercase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : List[Any] ):
__lowercase : Union[str, Any] = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
__lowercase : Optional[Any] = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__lowercase : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__lowercase : Any = model(_snake_case , attention_mask=_snake_case )[0]
__lowercase : Dict = (1, 11, 768)
self.assertEqual(output.shape , _snake_case )
__lowercase : str = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 709
|
from __future__ import annotations
def UpperCAmelCase_ ( __lowerCAmelCase ) -> int:
if not nums:
return 0
__lowercase : List[Any] = nums[0]
__lowercase : Union[str, Any] = 0
for num in nums[1:]:
__lowercase , __lowercase : Dict = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284
| 0
|
'''simple docstring'''
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class A ( _a ,_a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'control_image'} )
lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any]=0 ) -> List[Any]:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , )
_a = floats_tensor(control_image.shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = StableDiffusionControlNetImgaImgPipeline
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowercase_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCAmelCase_ : List[str] ):
if isinstance(lowerCAmelCase_ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCAmelCase_ )
torch.manual_seed(0 )
_a = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=lowerCAmelCase_ , set_alpha_to_one=lowerCAmelCase_ , )
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
_a = CLIPTextModel(lowerCAmelCase_ )
_a = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a = MultiControlNetModel([controlneta, controlneta] )
_a = {
'''unet''': unet,
'''controlnet''': controlnet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Union[str, Any]=0 ) -> Tuple:
"""simple docstring"""
if str(lowerCAmelCase_ ).startswith('''mps''' ):
_a = torch.manual_seed(lowerCAmelCase_ )
else:
_a = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_a = 2
_a = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCAmelCase_ , device=torch.device(lowerCAmelCase_ ) , ),
]
_a = floats_tensor(control_image[0].shape , rng=random.Random(lowerCAmelCase_ ) ).to(lowerCAmelCase_ )
_a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(lowerCAmelCase_ ) ).convert('''RGB''' ).resize((64, 64) )
_a = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''image''': image,
'''control_image''': control_image,
}
return inputs
def __lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
_a = 1_0.0
_a = 4
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
_a = self.get_dummy_inputs(lowerCAmelCase_ )
_a = steps
_a = scale
_a = pipe(**lowerCAmelCase_ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_a = self.get_dummy_components()
_a = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCAmelCase_ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = ControlNetModel.from_pretrained('''lllyasviel/sd-controlnet-canny''' )
_a = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , safety_checker=lowerCAmelCase_ , controlnet=lowerCAmelCase_ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_a = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a = '''evil space-punk bird'''
_a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png''' ).resize((5_12, 5_12) )
_a = load_image(
'''https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png''' ).resize((5_12, 5_12) )
_a = pipe(
lowerCAmelCase_ , lowerCAmelCase_ , control_image=lowerCAmelCase_ , generator=lowerCAmelCase_ , output_type='''np''' , num_inference_steps=50 , strength=0.6 , )
_a = output.images[0]
assert image.shape == (5_12, 5_12, 3)
_a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy''' )
assert np.abs(expected_image - image ).max() < 9e-2
| 22
|
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669
| 0
|
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _lowerCAmelCase ( __lowerCamelCase:str , __lowerCamelCase:str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1_0_2_4,
"hidden_size": 7_6_8,
"max_length": 5_1_2,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1_0_2_4,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1E-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=__lowerCamelCase , output_all_encodings=__lowerCamelCase , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , __lowerCamelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , cls=__lowerCamelCase )
__magic_name__ = nlp.model.BERTModel(
__lowerCamelCase , len(__lowerCamelCase ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=__lowerCamelCase , use_token_type_embed=__lowerCamelCase , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=__lowerCamelCase , use_decoder=__lowerCamelCase , )
original_bort.load_parameters(__lowerCamelCase , cast_dtype=__lowerCamelCase , ignore_extra=__lowerCamelCase )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.0_2,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(__lowerCamelCase ),
}
__magic_name__ = BertConfig.from_dict(__lowerCamelCase )
__magic_name__ = BertForMaskedLM(__lowerCamelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(__lowerCamelCase:Tuple ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(__lowerCamelCase:str , __lowerCamelCase:Union[str, Any] ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'''The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'''
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.bias''' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_key.weight''' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.bias''' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_query.weight''' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.bias''' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , f'''encoder.transformer_cells.{i}.attention_cell.proj_value.weight''' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , f'''encoder.transformer_cells.{i}.proj.bias''' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , f'''encoder.transformer_cells.{i}.proj.weight''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.layer_norm.gamma''' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_1.bias''' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_1.weight''' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , f'''encoder.transformer_cells.{i}.ffn.ffn_2.bias''' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , f'''encoder.transformer_cells.{i}.ffn.ffn_2.weight''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , f'''encoder.transformer_cells.{i}.ffn.layer_norm.beta''' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , f'''encoder.transformer_cells.{i}.ffn.layer_norm.gamma''' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=__lowerCamelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(__lowerCamelCase )
__magic_name__ = BertModel.from_pretrained(__lowerCamelCase )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(__lowerCamelCase , return_tensors="pt" )
__magic_name__ = hf_bort_model(**__lowerCamelCase )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(__lowerCamelCase , __lowerCamelCase , atol=1E-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , __lowerCamelCase )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 468
|
"""simple docstring"""
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def _lowerCAmelCase ( __lowerCamelCase:List[Any] , __lowerCamelCase:int , __lowerCamelCase:List[Any]=None , __lowerCamelCase:Any=None , __lowerCamelCase:Any=None , __lowerCamelCase:List[str]=None , __lowerCamelCase:Optional[int]=None , __lowerCamelCase:Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
__magic_name__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
__magic_name__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
__magic_name__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class A_ :
def __init__( self : Tuple , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : str=7 , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Dict=False , __lowerCamelCase : Optional[int]=9_9 , __lowerCamelCase : Tuple=1_6 , __lowerCamelCase : Any=2 , __lowerCamelCase : int=4 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : List[str]=3_2 , __lowerCamelCase : int=2 , __lowerCamelCase : Any=1 , __lowerCamelCase : int=0 , __lowerCamelCase : Tuple=0.02 , ) -> List[Any]:
__magic_name__ = parent
__magic_name__ = batch_size
__magic_name__ = seq_length
__magic_name__ = is_training
__magic_name__ = use_labels
__magic_name__ = vocab_size
__magic_name__ = hidden_size
__magic_name__ = num_hidden_layers
__magic_name__ = num_attention_heads
__magic_name__ = intermediate_size
__magic_name__ = hidden_act
__magic_name__ = hidden_dropout_prob
__magic_name__ = attention_probs_dropout_prob
__magic_name__ = max_position_embeddings
__magic_name__ = eos_token_id
__magic_name__ = pad_token_id
__magic_name__ = bos_token_id
__magic_name__ = initializer_range
def _snake_case ( self : str ) -> List[str]:
__magic_name__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
__magic_name__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCamelCase , )
__magic_name__ = prepare_blenderbot_inputs_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return config, inputs_dict
def _snake_case ( self : Optional[Any] ) -> Any:
__magic_name__ , __magic_name__ = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def _snake_case ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] , __lowerCamelCase : Tuple ) -> int:
__magic_name__ = 2_0
__magic_name__ = model_class_name(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] )
__magic_name__ , __magic_name__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
__magic_name__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__magic_name__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCamelCase , __lowerCamelCase )
__magic_name__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__magic_name__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase , past_key_values=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
__magic_name__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCamelCase , decoder_position_ids=__lowerCamelCase , )
__magic_name__ = model.decode(__lowerCamelCase , __lowerCamelCase , decoder_attention_mask=__lowerCamelCase )
__magic_name__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class A_ ( unittest.TestCase ):
UpperCAmelCase__ = 9_9
def _snake_case ( self : Dict ) -> Dict:
__magic_name__ = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
__magic_name__ = input_ids.shape[0]
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def _snake_case ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ , __magic_name__ , __magic_name__ = self._get_config_and_data()
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = lm_model(input_ids=__lowerCamelCase )
__magic_name__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : List[Any] ) -> Optional[Any]:
__magic_name__ = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
__magic_name__ = FlaxBlenderbotForConditionalGeneration(__lowerCamelCase )
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
__magic_name__ = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
__magic_name__ = lm_model(input_ids=__lowerCamelCase , decoder_input_ids=__lowerCamelCase )
__magic_name__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCamelCase )
def _snake_case ( self : Union[str, Any] ) -> List[Any]:
__magic_name__ = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
__magic_name__ = shift_tokens_right(__lowerCamelCase , 1 , 2 )
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
__magic_name__ = np.equal(__lowerCamelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCamelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class A_ ( snake_case_ , unittest.TestCase , snake_case_ ):
UpperCAmelCase__ = True
UpperCAmelCase__ = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def _snake_case ( self : List[Any] ) -> Any:
__magic_name__ = FlaxBlenderbotModelTester(self )
def _snake_case ( self : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _snake_case ( self : Tuple ) -> Dict:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
__magic_name__ = model_class(__lowerCamelCase )
@jax.jit
def encode_jitted(__lowerCamelCase : Dict , __lowerCamelCase : str=None , **__lowerCamelCase : List[str] ):
return model.encode(input_ids=__lowerCamelCase , attention_mask=__lowerCamelCase )
with self.subTest("JIT Enabled" ):
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = encode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__magic_name__ = model_class(__lowerCamelCase )
__magic_name__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
__magic_name__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(__lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
return model.decode(
decoder_input_ids=__lowerCamelCase , decoder_attention_mask=__lowerCamelCase , encoder_outputs=__lowerCamelCase , )
with self.subTest("JIT Enabled" ):
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__magic_name__ = decode_jitted(**__lowerCamelCase ).to_tuple()
self.assertEqual(len(__lowerCamelCase ) , len(__lowerCamelCase ) )
for jitted_output, output in zip(__lowerCamelCase , __lowerCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) -> int:
for model_class_name in self.all_model_classes:
__magic_name__ = model_class_name.from_pretrained("facebook/blenderbot-400M-distill" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
__magic_name__ = np.ones((1, 1) ) * model.config.eos_token_id
__magic_name__ = model(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skipUnless(jax_device != "cpu" , "3B test too slow on CPU." )
@slow
def _snake_case ( self : int ) -> List[Any]:
__magic_name__ = {"num_beams": 1, "early_stopping": True, "min_length": 1_5, "max_length": 2_5}
__magic_name__ = {"skip_special_tokens": True, "clean_up_tokenization_spaces": True}
__magic_name__ = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-3B" , from_pt=__lowerCamelCase )
__magic_name__ = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B" )
__magic_name__ = ["Sam"]
__magic_name__ = tokenizer(__lowerCamelCase , return_tensors="jax" )
__magic_name__ = model.generate(**__lowerCamelCase , **__lowerCamelCase )
__magic_name__ = "Sam is a great name. It means \"sun\" in Gaelic."
__magic_name__ = tokenizer.batch_decode(__lowerCamelCase , **__lowerCamelCase )
assert generated_txt[0].strip() == tgt_text
| 468
| 1
|
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class a__ ( unittest.TestCase ):
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.Linear(10,10 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.optim.SGD(model.parameters(),0.1 )
SCREAMING_SNAKE_CASE_ : Dict = Accelerator()
SCREAMING_SNAKE_CASE_ : str = accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'Accelerated optimizer pickling failed with {e}' )
AcceleratorState._reset_state()
| 216
|
"""simple docstring"""
def __a ( A ) -> List[str]:
'''simple docstring'''
A__ = [0] * len(A )
A__ = []
A__ = []
A__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(A ) ):
if indegree[i] == 0:
queue.append(A )
while queue:
A__ = queue.pop(0 )
cnt += 1
topo.append(A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(A )
if cnt != len(A ):
print("Cycle exists" )
else:
print(A )
# Adjacency List of Graph
__UpperCAmelCase ={0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 337
| 0
|
class A__ :
"""simple docstring"""
def __init__( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Tuple = {}
def __magic_name__ ( self : str ):
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(_lowercase , " -> " , " -> ".join([str(_lowercase ) for j in self.vertex[i]] ) )
def __magic_name__ ( self : Union[str, Any] , A_ : str , A_ : List[str] ):
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_lowercase )
else:
# else make a new vertex
_lowerCAmelCase : Optional[int] = [to_vertex]
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Any = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
def __magic_name__ ( self : str , A_ : str , A_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Dict = True
print(_lowercase , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_lowercase , _lowercase )
if __name__ == "__main__":
__UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('DFS:')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 719
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : int = (3_2, 3_2)
_lowerCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(A_ )
return image
@property
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=A_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(A_ )
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : Tuple = self.dummy_cond_unet_upscale
_lowerCAmelCase : Optional[int] = DDPMScheduler()
_lowerCAmelCase : Optional[Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : str = self.dummy_vae
_lowerCAmelCase : Any = self.dummy_text_encoder
_lowerCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : List[Any] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[Any] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Optional[int] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : List[str] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : Optional[int] = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : Tuple = output.images
_lowerCAmelCase : Optional[int] = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : Union[str, Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=A_ , )[0]
_lowerCAmelCase : List[Any] = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[str] = image_from_tuple[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_lowerCAmelCase : str = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __magic_name__ ( self : int ):
'''simple docstring'''
_lowerCAmelCase : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
_lowerCAmelCase : List[str] = self.dummy_cond_unet_upscale
_lowerCAmelCase : str = DDPMScheduler()
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : Tuple = self.dummy_vae
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : Dict = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Tuple = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : Optional[Any] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Dict = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : Union[str, Any] = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Tuple = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : List[str] = output.images
assert image.shape[0] == 2
_lowerCAmelCase : str = torch.Generator(device=A_ ).manual_seed(0 )
_lowerCAmelCase : List[Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_lowerCAmelCase : int = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Any = self.dummy_cond_unet_upscale
_lowerCAmelCase : List[str] = DDPMScheduler()
_lowerCAmelCase : Optional[int] = DDIMScheduler(prediction_type="v_prediction" )
_lowerCAmelCase : Optional[Any] = self.dummy_vae
_lowerCAmelCase : List[str] = self.dummy_text_encoder
_lowerCAmelCase : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_lowerCAmelCase : Any = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Any = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_lowerCAmelCase : Tuple = unet.half()
_lowerCAmelCase : Optional[Any] = text_encoder.half()
# make sure here that pndm scheduler skips prk
_lowerCAmelCase : List[str] = StableDiffusionUpscalePipeline(
unet=A_ , low_res_scheduler=A_ , scheduler=A_ , vae=A_ , text_encoder=A_ , tokenizer=A_ , max_noise_level=3_5_0 , )
_lowerCAmelCase : Optional[Any] = sd_pipe.to(A_ )
sd_pipe.set_progress_bar_config(disable=A_ )
_lowerCAmelCase : str = "A painting of a squirrel eating a burger"
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : Optional[Any] = sd_pipe(
[prompt] , image=A_ , generator=A_ , num_inference_steps=2 , output_type="np" , ).images
_lowerCAmelCase : List[str] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : Any ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Optional[int] = StableDiffusionUpscalePipeline.from_pretrained(A_ )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : str = "a cat sitting on a park bench"
_lowerCAmelCase : str = torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type="np" , )
_lowerCAmelCase : int = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __magic_name__ ( self : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_lowerCAmelCase : Union[str, Any] = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing()
_lowerCAmelCase : Optional[Any] = "a cat sitting on a park bench"
_lowerCAmelCase : Any = torch.manual_seed(0 )
_lowerCAmelCase : int = pipe(
prompt=A_ , image=A_ , generator=A_ , output_type="np" , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowerCAmelCase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_lowerCAmelCase : Tuple = "stabilityai/stable-diffusion-x4-upscaler"
_lowerCAmelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
A_ , torch_dtype=torch.floataa , )
pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowerCAmelCase : List[Any] = "a cat sitting on a park bench"
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = pipe(
prompt=A_ , image=A_ , generator=A_ , num_inference_steps=5 , output_type="np" , )
_lowerCAmelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 503
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
a__ = {}
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = {}
def lowercase__ ( self , _a , _a , _a ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__snake_case )
if nodea not in self.connections:
self.add_node(__snake_case )
a__ = probability
def lowercase__ ( self ):
"""simple docstring"""
return list(self.connections )
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = 0
a__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def lowerCAmelCase_ ( a : Tuple , a : Union[str, Any] , a : List[Any] ):
a__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
a__ = Counter(graph.get_nodes() )
a__ = start
for _ in range(UpperCamelCase_ ):
a__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394
|
import qiskit
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : Tuple = qiskit.Aer.get_backend('''aer_simulator''' )
_a : Any = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
_a : List[Any] = qiskit.execute(UpperCamelCase_ , UpperCamelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = half_adder(1, 1)
print(f'''Half Adder Output Qubit Counts: {counts}''')
| 471
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''gptsan-japanese'''
__SCREAMING_SNAKE_CASE : Optional[Any] = [
'''past_key_values''',
]
__SCREAMING_SNAKE_CASE : List[str] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=3_6000 , snake_case=1280 , snake_case=1024 , snake_case=8192 , snake_case=4096 , snake_case=128 , snake_case=10 , snake_case=0 , snake_case=16 , snake_case=16 , snake_case=128 , snake_case=0.0 , snake_case=1e-5 , snake_case=False , snake_case=0.0 , snake_case="float32" , snake_case=False , snake_case=False , snake_case=False , snake_case=0.0_02 , snake_case=False , snake_case=True , snake_case=3_5998 , snake_case=3_5995 , snake_case=3_5999 , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = max_position_embeddings
snake_case_ = d_model
snake_case_ = d_ff
snake_case_ = d_ext
snake_case_ = d_spout
snake_case_ = num_switch_layers
snake_case_ = num_ext_layers
snake_case_ = num_switch_layers + num_ext_layers
snake_case_ = num_heads
snake_case_ = num_experts
snake_case_ = expert_capacity
snake_case_ = dropout_rate
snake_case_ = layer_norm_epsilon
snake_case_ = router_bias
snake_case_ = router_jitter_noise
snake_case_ = router_dtype
snake_case_ = router_ignore_padding_tokens
snake_case_ = output_hidden_states
snake_case_ = output_attentions
snake_case_ = initializer_factor
snake_case_ = output_router_logits
snake_case_ = use_cache
super().__init__(
separator_token_id=snake_case , pad_token_id=snake_case , eos_token_id=snake_case , **snake_case , )
| 700
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_UpperCAmelCase : Dict = logging.get_logger(__name__)
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Any = '''upernet'''
def __init__( self , snake_case=None , snake_case=512 , snake_case=0.02 , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=384 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ):
super().__init__(**snake_case )
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
snake_case_ = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'] )
elif isinstance(snake_case , snake_case ):
snake_case_ = backbone_config.get('model_type' )
snake_case_ = CONFIG_MAPPING[backbone_model_type]
snake_case_ = config_class.from_dict(snake_case )
snake_case_ = backbone_config
snake_case_ = hidden_size
snake_case_ = initializer_range
snake_case_ = pool_scales
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = auxiliary_in_channels
snake_case_ = auxiliary_channels
snake_case_ = auxiliary_num_convs
snake_case_ = auxiliary_concat_input
snake_case_ = loss_ignore_index
def a ( self ):
snake_case_ = copy.deepcopy(self.__dict__ )
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output
| 108
| 0
|
"""simple docstring"""
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
SCREAMING_SNAKE_CASE__:List[Any] = logging.get_logger(__name__)
class snake_case__ :
_snake_case : Optional[int] = None
@experimental
def _lowerCamelCase( a , a , a , a , a , a , a ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
a , a , a , a , a , a , a )
return _map_with_joblib(a , a , a , a , a , a , a )
def _lowerCamelCase( a , a , a , a , a , a , a ):
__a = num_proc if num_proc <= len(a ) else len(a )
__a = [] # We organize the splits ourselve (contiguous splits)
for index in range(a ):
__a = len(a ) // num_proc
__a = len(a ) % num_proc
__a = div * index + min(a , a )
__a = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(a ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(a )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(a )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
__a , __a = None, None
if not disable_tqdm:
__a , __a = (RLock(),), tqdm.set_lock
with Pool(a , initargs=a , initializer=a ) as pool:
__a = pool.map(a , a )
logger.info(F"Finished {num_proc} processes" )
__a = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(a )} objects" )
return mapped
def _lowerCamelCase( a , a , a , a , a , a , a ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=a ):
return joblib.Parallel()(
joblib.delayed(a )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def _lowerCamelCase( a ):
__a = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
__a = None
| 528
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:Optional[Any] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:List[str] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__:int = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__:int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 528
| 1
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def snake_case ( A__ ,A__ ,A__ = 1 ,A__ = 1 ,A__ = 1.0e4 ,A__ = False ,A__ = 1.0 ,):
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
UpperCAmelCase_ : Any = float(embedding_dim // 2 )
UpperCAmelCase_ : Optional[int] = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
UpperCAmelCase_ : Any = min_timescale * jnp.exp(jnp.arange(A__ ,dtype=jnp.floataa ) * -log_timescale_increment )
UpperCAmelCase_ : Dict = jnp.expand_dims(A__ ,1 ) * jnp.expand_dims(A__ ,0 )
# scale embeddings
UpperCAmelCase_ : str = scale * emb
if flip_sin_to_cos:
UpperCAmelCase_ : str = jnp.concatenate([jnp.cos(A__ ), jnp.sin(A__ )] ,axis=1 )
else:
UpperCAmelCase_ : Union[str, Any] = jnp.concatenate([jnp.sin(A__ ), jnp.cos(A__ )] ,axis=1 )
UpperCAmelCase_ : Union[str, Any] = jnp.reshape(A__ ,[jnp.shape(A__ )[0], embedding_dim] )
return signal
class UpperCamelCase_ (nn.Module ):
__magic_name__ = 32
__magic_name__ = jnp.floataa
@nn.compact
def __call__( self : int , lowerCAmelCase_ : str ) -> Any:
UpperCAmelCase_ : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = nn.silu(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(lowerCAmelCase_ )
return temb
class UpperCamelCase_ (nn.Module ):
__magic_name__ = 32
__magic_name__ = False
__magic_name__ = 1
@nn.compact
def __call__( self : Optional[Any] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
return get_sinusoidal_embeddings(
lowerCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 718
|
"""simple docstring"""
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class UpperCamelCase_ :
__magic_name__ = None
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase_ : Union[str, Any] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
UpperCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[Any] = os.path.join(lowerCAmelCase_ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.feature_extraction_class.from_json_file(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = feat_extract_first.save_pretrained(lowerCAmelCase_ )[0]
check_json_file_has_correct_format(lowerCAmelCase_ )
UpperCAmelCase_ : Any = self.feature_extraction_class.from_pretrained(lowerCAmelCase_ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.feature_extraction_class()
self.assertIsNotNone(lowerCAmelCase_ )
| 463
| 0
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class A__ ( _snake_case ):
lowercase = ["image_processor", "feature_extractor"]
lowercase = "TvltImageProcessor"
lowercase = "TvltFeatureExtractor"
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(image_processor=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
A_ = image_processor
A_ = feature_extractor
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , *UpperCamelCase__ , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
A_ = None
if images is not None:
A_ = self.image_processor(UpperCamelCase__ , mask_pixel=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if images_mixed is not None:
A_ = self.image_processor(UpperCamelCase__ , is_mixed=UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ )
if audio is not None:
A_ = self.feature_extractor(
UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , mask_audio=UpperCamelCase__ , **UpperCamelCase__ )
A_ = {}
if audio is not None:
output_dict.update(UpperCamelCase__ )
if images is not None:
output_dict.update(UpperCamelCase__ )
if images_mixed_dict is not None:
output_dict.update(UpperCamelCase__ )
return output_dict
@property
def snake_case_ ( self ) -> List[Any]:
'''simple docstring'''
A_ = self.image_processor.model_input_names
A_ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 288
|
'''simple docstring'''
import pytest
import datasets
# Import fixture modules as plugins
__lowerCamelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["""integration""", """unit"""] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
config.addinivalue_line("""markers""", """torchaudio_latest: mark test to run with torchaudio>=0.12""" )
@pytest.fixture(autouse=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ ) -> int:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A_ = tmp_path_factory.getbasetemp() / """cache"""
A_ = test_hf_cache_home / """datasets"""
A_ = test_hf_cache_home / """metrics"""
A_ = test_hf_cache_home / """modules"""
monkeypatch.setattr("""datasets.config.HF_DATASETS_CACHE""", str(UpperCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_METRICS_CACHE""", str(UpperCAmelCase__ ) )
monkeypatch.setattr("""datasets.config.HF_MODULES_CACHE""", str(UpperCAmelCase__ ) )
A_ = test_hf_datasets_cache / """downloads"""
monkeypatch.setattr("""datasets.config.DOWNLOADED_DATASETS_PATH""", str(UpperCAmelCase__ ) )
A_ = test_hf_datasets_cache / """downloads""" / """extracted"""
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""", str(UpperCAmelCase__ ) )
@pytest.fixture(autouse=UpperCAmelCase__, scope="""session""" )
def UpperCAmelCase__ ( ) -> str:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCAmelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr("""datasets.config.HF_UPDATE_DOWNLOAD_COUNTS""", UpperCAmelCase__ )
@pytest.fixture
def UpperCAmelCase__ ( UpperCAmelCase__ ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("""sqlalchemy.util.deprecations.SILENCE_UBER_WARNING""", UpperCAmelCase__ )
| 288
| 1
|
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
while a != 0:
UpperCamelCase , UpperCamelCase = b % a, a
return b
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
if gcd(A__ , A__ ) != 1:
UpperCamelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = 1, 0, a
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 1, m
while va != 0:
UpperCamelCase = ua // va
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 324
|
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowerCamelCase : Dict = False
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Dict = "ybelkada/fonts"
def __lowerCamelCase ( ) -> Optional[int]:
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"""You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use """
'Pix2StructImageProcessor. Please upgrade torch.' )
def __lowerCamelCase ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
requires_backends(A__ , ['torch'] )
_check_torch_version()
UpperCamelCase = image_tensor.unsqueeze(0 )
UpperCamelCase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
UpperCamelCase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
UpperCamelCase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def __lowerCamelCase ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ) -> Image.Image:
"""simple docstring"""
requires_backends(A__ , 'vision' )
# Add new lines so that each line is no more than 80 characters.
UpperCamelCase = textwrap.TextWrapper(width=80 )
UpperCamelCase = wrapper.wrap(text=A__ )
UpperCamelCase = '\n'.join(A__ )
if font_bytes is not None and font_path is None:
UpperCamelCase = io.BytesIO(A__ )
elif font_path is not None:
UpperCamelCase = font_path
else:
UpperCamelCase = hf_hub_download(A__ , 'Arial.TTF' )
UpperCamelCase = ImageFont.truetype(A__ , encoding='UTF-8' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
UpperCamelCase = ImageDraw.Draw(Image.new('RGB' , (1, 1) , A__ ) )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
UpperCamelCase = text_width + left_padding + right_padding
UpperCamelCase = text_height + top_padding + bottom_padding
UpperCamelCase = Image.new('RGB' , (image_width, image_height) , A__ )
UpperCamelCase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def __lowerCamelCase ( A__ , A__ , **A__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(A__ , 'vision' )
# Convert to PIL image if necessary
UpperCamelCase = to_pil_image(A__ )
UpperCamelCase = render_text(A__ , **A__ )
UpperCamelCase = max(header_image.width , image.width )
UpperCamelCase = int(image.height * (new_width / image.width) )
UpperCamelCase = int(header_image.height * (new_width / header_image.width) )
UpperCamelCase = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
UpperCamelCase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
UpperCamelCase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["""flattened_patches"""]
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 2_0_4_8 , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = patch_size if patch_size is not None else {'height': 1_6, 'width': 1_6}
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = max_patches
UpperCamelCase = is_vqa
def A ( self : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : dict , **UpperCamelCase__ : Union[str, Any] ):
"""simple docstring"""
requires_backends(self.extract_flattened_patches , 'torch' )
_check_torch_version()
# convert to torch
UpperCamelCase = to_channel_dimension_format(UpperCamelCase__ , ChannelDimension.FIRST )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = patch_size['height'], patch_size['width']
UpperCamelCase , UpperCamelCase = get_image_size(UpperCamelCase__ )
# maximize scale s.t.
UpperCamelCase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
UpperCamelCase = max(min(math.floor(scale * image_height / patch_height ) , UpperCamelCase__ ) , 1 )
UpperCamelCase = max(min(math.floor(scale * image_width / patch_width ) , UpperCamelCase__ ) , 1 )
UpperCamelCase = max(num_feasible_rows * patch_height , 1 )
UpperCamelCase = max(num_feasible_cols * patch_width , 1 )
UpperCamelCase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) , size=(resized_height, resized_width) , mode='bilinear' , align_corners=UpperCamelCase__ , antialias=UpperCamelCase__ , ).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
UpperCamelCase = torch_extract_patches(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = patches.shape
UpperCamelCase = patches_shape[1]
UpperCamelCase = patches_shape[2]
UpperCamelCase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
UpperCamelCase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
UpperCamelCase = torch.arange(UpperCamelCase__ ).reshape([rows, 1] ).repeat(1 , UpperCamelCase__ ).reshape([rows * columns, 1] )
UpperCamelCase = torch.arange(UpperCamelCase__ ).reshape([1, columns] ).repeat(UpperCamelCase__ , 1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
UpperCamelCase = row_ids.to(torch.floataa )
UpperCamelCase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.cat([row_ids, col_ids, patches] , -1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
UpperCamelCase = torch.nn.functional.pad(UpperCamelCase__ , [0, 0, 0, max_patches - (rows * columns)] ).float()
UpperCamelCase = to_numpy_array(UpperCamelCase__ )
return result
def A ( self : List[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple ):
"""simple docstring"""
if image.dtype == np.uinta:
UpperCamelCase = image.astype(np.floataa )
# take mean across the whole `image`
UpperCamelCase = np.mean(UpperCamelCase__ )
UpperCamelCase = np.std(UpperCamelCase__ )
UpperCamelCase = max(UpperCamelCase__ , 1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ):
"""simple docstring"""
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase = patch_size if patch_size is not None else self.patch_size
UpperCamelCase = max_patches if max_patches is not None else self.max_patches
UpperCamelCase = self.is_vqa
if kwargs.get('data_format' , UpperCamelCase__ ) is not None:
raise ValueError('data_format is not an accepted input as the outputs are ' )
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase = [convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('A header text must be provided for VQA models.' )
UpperCamelCase = kwargs.pop('font_bytes' , UpperCamelCase__ )
UpperCamelCase = kwargs.pop('font_path' , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [header_text] * len(UpperCamelCase__ )
UpperCamelCase = [
render_header(UpperCamelCase__ , header_text[i] , font_bytes=UpperCamelCase__ , font_path=UpperCamelCase__ )
for i, image in enumerate(UpperCamelCase__ )
]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ ) for image in images]
# convert to torch tensor and permute
UpperCamelCase = [
self.extract_flattened_patches(image=UpperCamelCase__ , max_patches=UpperCamelCase__ , patch_size=UpperCamelCase__ )
for image in images
]
# create attention mask in numpy
UpperCamelCase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
UpperCamelCase = BatchFeature(
data={'flattened_patches': images, 'attention_mask': attention_masks} , tensor_type=UpperCamelCase__ )
return encoded_outputs
| 324
| 1
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _lowerCAmelCase ( __magic_name__ : Optional[int] , __magic_name__ : Tuple , __magic_name__ : List[Any] ) -> List[Any]:
lowercase : Any =hf_hub_url(repo_id=__magic_name__ , path=__magic_name__ , revision=__magic_name__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(__magic_name__ )}'''
| 92
|
"""simple docstring"""
lowercase_ = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
lowercase_ = [{"type": "code", "content": INSTALL_CONTENT}]
lowercase_ = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 470
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
__magic_name__ : int
__magic_name__ : int
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self , lowerCamelCase__) -> List[str]:
'''simple docstring'''
snake_case__ : list[list[Edge]] = [[] for _ in range(lowerCamelCase__)]
snake_case__ : str = size
def __getitem__( self , lowerCamelCase__) -> Iterator[Edge]:
'''simple docstring'''
return iter(self._graph[vertex])
@property
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
return self._size
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__) -> str:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError("Edge weight must be either 0 or 1.")
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError("Vertex indexes must be in [0; size).")
self._graph[from_vertex].append(Edge(lowerCamelCase__ , lowerCamelCase__))
def UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__) -> int | None:
'''simple docstring'''
snake_case__ : int = deque([start_vertex])
snake_case__ : list[int | None] = [None] * self.size
snake_case__ : List[str] = 0
while queue:
snake_case__ : str = queue.popleft()
snake_case__ : Dict = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
snake_case__ : Optional[Any] = current_distance + edge.weight
snake_case__ : List[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCamelCase__ , lowerCamelCase__)
and new_distance >= dest_vertex_distance
):
continue
snake_case__ : str = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex)
else:
queue.append(edge.destination_vertex)
if distances[finish_vertex] is None:
raise ValueError("No path from start_vertex to finish_vertex.")
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 150
|
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def A__ ( _UpperCAmelCase : jnp.ndarray , _UpperCAmelCase : int , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1 , _UpperCAmelCase : float = 1.0e4 , _UpperCAmelCase : bool = False , _UpperCAmelCase : float = 1.0 , ) -> jnp.ndarray:
'''simple docstring'''
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
snake_case__ : List[Any] = float(embedding_dim // 2 )
snake_case__ : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
snake_case__ : Any = min_timescale * jnp.exp(jnp.arange(_UpperCAmelCase , dtype=jnp.floataa ) * -log_timescale_increment )
snake_case__ : Optional[int] = jnp.expand_dims(_UpperCAmelCase , 1 ) * jnp.expand_dims(_UpperCAmelCase , 0 )
# scale embeddings
snake_case__ : List[Any] = scale * emb
if flip_sin_to_cos:
snake_case__ : Any = jnp.concatenate([jnp.cos(_UpperCAmelCase ), jnp.sin(_UpperCAmelCase )] , axis=1 )
else:
snake_case__ : Optional[int] = jnp.concatenate([jnp.sin(_UpperCAmelCase ), jnp.cos(_UpperCAmelCase )] , axis=1 )
snake_case__ : Any = jnp.reshape(_UpperCAmelCase , [jnp.shape(_UpperCAmelCase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE_ ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self , lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
snake_case__ : str = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1")(lowerCamelCase__)
snake_case__ : Optional[Any] = nn.silu(lowerCamelCase__)
snake_case__ : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2")(lowerCamelCase__)
return temb
class SCREAMING_SNAKE_CASE_ ( nn.Module):
'''simple docstring'''
__magic_name__ : int = 32
__magic_name__ : bool = False
__magic_name__ : float = 1
@nn.compact
def __call__( self , lowerCamelCase__) -> str:
'''simple docstring'''
return get_sinusoidal_embeddings(
lowerCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift)
| 150
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : Union[str, Any] = ['image_processor', 'tokenizer']
a_ : List[Any] = 'CLIPImageProcessor'
a_ : Tuple = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : List[str]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , **SCREAMING_SNAKE_CASE__ : Tuple ):
lowerCamelCase__ = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , SCREAMING_SNAKE_CASE__ , )
lowerCamelCase__ = kwargs.pop('feature_extractor' )
lowerCamelCase__ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __call__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCamelCase__ = self.tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images is not None:
lowerCamelCase__ = self.image_processor(SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if text is not None and images is not None:
lowerCamelCase__ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**SCREAMING_SNAKE_CASE__ ) , tensor_type=SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : str , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : str ):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
@property
def _UpperCamelCase ( self : Union[str, Any] ):
lowerCamelCase__ = self.tokenizer.model_input_names
lowerCamelCase__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _UpperCamelCase ( self : List[str] ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor_class
@property
def _UpperCamelCase ( self : str ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , SCREAMING_SNAKE_CASE__ , )
return self.image_processor
| 510
|
"""simple docstring"""
class _a :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = size
lowerCamelCase__ = [0] * size
lowerCamelCase__ = [0] * size
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
return index | (index + 1)
@staticmethod
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
return (index & (index + 1)) - 1
def _UpperCamelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
lowerCamelCase__ = value
while index < self.size:
lowerCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ ) + 1
if current_left_border == index:
lowerCamelCase__ = value
else:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = self.get_next(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
right -= 1 # Because of right is exclusive
lowerCamelCase__ = 0
while left <= right:
lowerCamelCase__ = self.get_prev(SCREAMING_SNAKE_CASE__ )
if left <= current_left:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , self.tree[right] )
lowerCamelCase__ = current_left
else:
lowerCamelCase__ = max(SCREAMING_SNAKE_CASE__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 510
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ : Tuple = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[str] = ['ConditionalDetrFeatureExtractor']
UpperCAmelCase_ : Union[str, Any] = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Optional[Any] = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 11
|
'''simple docstring'''
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_lowerCamelCase : List[str] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
_lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
_lowerCamelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCamelCase : Tuple = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Any ):
"""simple docstring"""
if "metadata" in layer:
_lowerCamelCase : Optional[int] = layer.split("metadata" )
_lowerCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : Dict = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_lowerCamelCase : List[str] = layer.split("kvstore" )
_lowerCamelCase : Optional[int] = "".join(split_layer[0] )[:-1]
_lowerCamelCase : List[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_lowerCamelCase : Tuple = layer.split("/" )
_lowerCamelCase : int = "/".join(split_layer[:-1] )
_lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
_lowerCamelCase : int = F'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_lowerCamelCase : Optional[int] = "file"
else:
_lowerCamelCase : str = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : str = rename_keys(_lowerCAmelCase )
_lowerCamelCase : Dict = {}
for k, v in current_block.items():
_lowerCamelCase : Union[str, Any] = v
_lowerCamelCase : str = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : str = WEIGHTS_NAME ):
"""simple docstring"""
_lowerCamelCase : Dict = convert_file_size_to_int(_lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = []
_lowerCamelCase : Dict = {}
_lowerCamelCase : Optional[int] = 0
_lowerCamelCase : List[str] = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_lowerCamelCase : Tuple = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_lowerCamelCase : Union[str, Any] = flatten_dict(_lowerCAmelCase , sep="/" )
_lowerCamelCase : Optional[int] = {}
for layer in checkpoint_info.keys():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
_lowerCamelCase : Optional[int] = content
else:
_lowerCamelCase : int = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_lowerCamelCase : Any = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_lowerCamelCase : int = torch.tensor(_lowerCAmelCase )
_lowerCamelCase : str = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_lowerCamelCase , _lowerCamelCase : Optional[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
_lowerCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_lowerCamelCase : Tuple = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
_lowerCamelCase : Optional[int] = {}
_lowerCamelCase : Dict = 0
_lowerCamelCase : Optional[int] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{len(_lowerCAmelCase )+1:05d}-of-???.bin' ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_lowerCamelCase : Union[str, Any] = {}
_lowerCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
_lowerCamelCase : Dict = weights_name.replace(
".bin" , F'-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin' ) # len(sharded_state_dicts):05d}
_lowerCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
_lowerCamelCase : Tuple = shard
for key in shard:
_lowerCamelCase : str = shard_file
# Add the metadata
_lowerCamelCase : Optional[Any] = {"total_size": total_size}
_lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
_lowerCamelCase : int = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
UpperCAmelCase_ : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_lowerCamelCase : Optional[int] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_lowerCamelCase : int = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_lowerCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
_lowerCamelCase : Tuple = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_lowerCamelCase : Optional[int] = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
_lowerCamelCase : List[Any] = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 11
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.